text
stringlengths 2
999k
|
|---|
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt
from dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *
import ntpath
import os
import sys
from tqdm import tqdm
import time
from fn import getTime
import cv2
from pPose_nms import pose_nms, write_json
args = opt
args.dataset = 'coco'
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
if __name__ == "__main__":
videofile = args.video
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
if not len(videofile):
raise IOError('Error: must contain --video')
# Load input video
data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
(fourcc,fps,frameSize) = data_loader.videoinfo()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
print('Using fast inference...')
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
print('Using slow, more accurate inference...')
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_'+ntpath.basename(videofile).split('.')[0]+'.avi')
writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
im_names_desc = tqdm(range(data_loader.length()))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if orig_img is None:
break
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)]
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while(writer.running()):
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import decorators
from telemetry.core import util
from telemetry.page.actions import loop
from telemetry.unittest_util import tab_test_case
AUDIO_1_LOOP_CHECK = 'window.__hasEventCompleted("#audio_1", "loop");'
VIDEO_1_LOOP_CHECK = 'window.__hasEventCompleted("#video_1", "loop");'
class LoopActionTest(tab_test_case.TabTestCase):
def setUp(self):
tab_test_case.TabTestCase.setUp(self)
self.Navigate('video_test.html')
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithNoSelector(self):
"""Tests that with no selector Loop action loops first media element."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
action.RunAction(self._tab)
# Assert only first video has played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithAllSelector(self):
"""Tests that Loop action loops all video elements with selector='all'."""
action = loop.LoopAction(loop_count=2, selector='all',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
# Both videos not playing before running action.
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
action.RunAction(self._tab)
# Assert all media elements played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWaitForLoopTimeout(self):
"""Tests that wait_for_loop timeout_in_secondss if video does not loop."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=1)
action.WillRunAction(self._tab)
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertRaises(util.TimeoutException, action.RunAction, self._tab)
|
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
import re
__all__ = ['CatalaEnLexer']
class CatalaEnLexer(RegexLexer):
name = 'CatalaEn'
aliases = ['catala_en']
filenames = ['*.catala_en']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(u'(@@)', bygroups(Generic.Heading), 'main__1'),
(u'(@)', bygroups(Generic.Heading), 'main__2'),
(u'([^\\/\\n\\r])', bygroups(Text)),
(u'(\\/\\*)', bygroups(Text), 'code'),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'code': [
(u'(\\*\\/)', bygroups(Text), 'root'),
(u'(\\s*\\#.*$)', bygroups(Comment.Single)),
(u'(context)(\\s+)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)',
bygroups(Keyword.Declaration, Text, Name.Variable)),
(u'\\b(match|with\\s+pattern|fixed|by|decreasing|increasing|varies|with|we\\s+have|in|such\\s+that|exists|for|all|of|if|then|else|initial)\\b', bygroups(Keyword.Reserved)),
(u'\\b(scope|depends\\s+on|declaration|includes|collection|content|optional|structure|enumeration|context|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|label|exception)\\b', bygroups(Keyword.Declaration)),
(u'(\\|[0-9]+/[0-9]+/[0-9]+\\|)', bygroups(Number.Integer)),
(u'\\b(true|false)\\b', bygroups(Keyword.Constant)),
(u'\\b([0-9]+(,[0.9]*|))\\b', bygroups(Number.Integer)),
(u'(\\-\\-|\\;|\\.|\\,|\\:|\\(|\\)|\\[|\\]|\\{|\\})', bygroups(
Operator)),
(u'(\\-\\>|\\+\\.|\\+\\@|\\+\\^|\\+\\$|\\+|\\-\\.|\\-\\@|\\-\\^|\\-\\$|\\-|\\*\\.|\\*\\@|\\*\\^|\\*\\$|\\*|/\\.|/\\@|/\\^|/\\$|/|\\!|>\\.|>=\\.|<=\\.|<\\.|>\\@|>=\\@|<=\\@|<\\@|>\\$|>=\\$|<=\\$|<\\$|>\\^|>=\\^|<=\\^|<\\^|>|>=|<=|<|=|not|or|and|\\$|%|year|month|day)', bygroups(Operator)),
(u'\\b(integer|boolean|date|money|text|decimal|number|sum)\\b',
bygroups(Keyword.Type)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class, Operator, Name.Variable)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\'\\.]*)\\b', bygroups(Name.Variable, Operator, Text)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Variable)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class)),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'main__1': [
(u'(@@)', bygroups(Generic.Heading), 'root'),
(u'(.)', bygroups(Generic.Heading)),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'main__2': [
(u'(@)', bygroups(Generic.Heading), 'root'),
(u'(.)', bygroups(Generic.Heading)),
('(\n|\r|\r\n)', Text),
('.', Text),
]
}
|
from tensorflow.python import pywrap_tensorflow
checkpoint_path = 'checkpoints/VGGnet_fast_rcnn_iter_50000.ckpt'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
|
#! /usr/bin/env python
import os
import subprocess
import svgwrite
import math
import shutil
########################################################################################################################
def ensure_requisite_folders(path):
folder = os.path.split(path)[0]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
def _png_name(p):
return p.split(".svg")[0]+".png"
def to_png(from_path, to_path):
ensure_requisite_folders(to_path)
cmd = \
"""
convert {} {}
""".format(from_path, to_path)
subprocess.call(cmd.split())
def _advance_cursor(c, x, y):
return (c[0]+x, c[1]+y)
def _kot(dwg, _c, text, ox=-40, oy=50, style="font-size:40;font-family:Arial;font-weight:bold;stroke:black;stroke-width:1;fill:black"):
dwg.add(dwg.text(text, insert=(_c[0]+ox, _c[1]+oy), fill='black', style=style))
def figure_1(args):
_p = "_fig1.svg"#os.path.join(args.output_folder, "fig1.svg")
# diagram is (341,972); plots are 600,600
_1_size = (467, 986)
_2_size = (341, 972)
_size = (_1_size[0]+140+_2_size[0], _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40,0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.input_folder_2, "multixcan_illustration.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 90 , 0)
dwg.add(dwg.image(os.path.join(args.input_folder_2, "S-Predixcan-MT-diagram_2.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-multi-tissue-presentation.png")
to_png(_p, t)
os.remove(_p)
def figure_2(args):
_p = "_fig2.svg"#os.path.join(args.output_folder, "fig1.svg")
# diagram is (341,972); plots are 600,600
_1_size = (600, 600)
_size = (_1_size[0]*3+140, _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (20,0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "ukb_mt_vs_p_number_significant.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=0, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 50, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "UKB_Cholesterol_significant_bars.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=0, oy=30)
_c =_advance_cursor (_c, _1_size[0]+50, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "UKB_Cholesterol_qq.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=0, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-multi-tissue-ukb-cholesterol.png")
to_png(_p, t)
os.remove(_p)
def figure_3(args):
_p = "_fig3.svg"#os.path.join(args.output_folder, "fig1.svg")
# diagram is (341,972); plots are 600,600; illustration is 455,571
_1_size = (600, 600)
_2_size = (526*600.0/552, 600)
_size = (_1_size[0]*2+80, _1_size[1]*2+40)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40+math.ceil(_1_size[0]-_2_size[0])/2.0, 0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.input_folder_2, "smultixcan_illustration.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _2_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "smt_vs_sp_number_significant.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c = (40, _1_size[1]+40) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "PGC_scz2_qq.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[1]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "PGC_scz2_significant_bars.png"), _c, _1_size))
_kot(dwg, _c, "d", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-s-multi-tissue-presentation.png")
to_png(_p, t)
os.remove(_p)
def figure_5(args):
_p = "_fig5.svg"#os.path.join(args.output_folder, "fig1.svg")
_1_size = (800, 800)
_size = (_1_size[0]*2+80, _1_size[1]+40)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40, 0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "null_30_qq.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "null_0_qq.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-null.png")
to_png(_p, t)
os.remove(_p)
def figure_6_d(args):
_p = "_fig6.svg" # os.path.join(args.output_folder, "fig1.svg")
_1_size = (800, 800)
_size = (_1_size[0] * 2 + 80, _1_size[1] *2 + 80)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40.0, 40) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "single_tissue_bp.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "correlated_tissues_bp.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c = (40, _1_size[1]*1+80)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_brain_bp.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_all_bp.png"), _c, _1_size))
_kot(dwg, _c, "d", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-misc.png")
to_png(_p, t)
os.remove(_p)
def figure_6(args):
_p = "_fig6.svg" # os.path.join(args.output_folder, "fig1.svg")
_1_size = (800, 800)
_size = (_1_size[0] * 3 + 80, _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40.0, 0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "single_tissue_bp.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_brain_bp.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_all_bp.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-misc.png")
to_png(_p, t)
os.remove(_p)
def shove(args):
def _shove(input_folder, output_folder, files, file_prefix=""):
for sf in files:
shutil.copy(os.path.join(input_folder, *sf),
os.path.join(output_folder, file_prefix + sf[len(sf) - 1].replace("_", "-")))
figures = [("ukb","smt_vs_mt_ukb.png",)]
_shove(args.plots_folder, args.output_folder, figures, file_prefix="fig-")
supp_figures = [("ukb", "smt_vs_mt_ukb_supp.png",),
("ukb", "proportion_underestimated_ukb.png",),
("ukb", "UKB_Cholesterol_significant_bars_fdr.png",),
("simulations", "combination_all_tendency.png",),
("simulations", "pc.png"),
("wtccc", "t1d_snp_intersection.png")]
_shove(args.plots_folder, args.output_folder, supp_figures, "supp-fig-")
supp_data =[("gwas_traits.txt",),
("gwas_smultixcan_stats.txt",),
("gwas_smultixcan_significant.txt",),
("gwas_sp_significant.txt",),
("ukb_multixcan_stats.txt",),
("ukb_p_significant.txt",),
("ukb_multixcan_significant.txt",),
("ukb_individual_pm.txt",),
("wtccc_t1d.txt",)]
_shove(args.input_folder, args.output_folder, supp_data, "supp-data-")
images = [("corrplot_pearson_SLC5A6.png",)]
_shove(args.input_folder_2, args.output_folder, images, "supp-fig-")
########################################################################################################################
def run(args):
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
shove(args)
figure_1(args)
figure_2(args)
figure_3(args)
#figure_4(args)
figure_5(args)
figure_6(args)
if __name__ == "__main__":
class Dummy(object):
def __init__(self):
self.output_folder = "results/paper_material"
self.plots_folder = "results/plots"
self.input_folder = "results"
self.input_folder_2 = "images"
self.input_folder_3 = "external_data"
args = Dummy()
run(args)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/00_rgbkm.ipynb (unless otherwise specified).
__all__ = ['reflectance']
# Cell
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
import scipy.optimize as optimize
def reflectance(K, S, D, Rg):
'''Calculates reflectance for single colorant Kubelka-Munk model.
Based on Nobbs (1997) formulation with modified Saunderson expression for infinite reflectance.
Function works for single channel, 3 RGB channels, and spectral data/images with muliple wavelength channels.
Parameters:
-----------
K: tuple-like (n channels)
Colorant absorption coefficients for wavelength or RGB channels
S: tuple-like (n channels)
Colorant scattering coefficients for wavelength or RGB channels
D: array ( height x width)
Colorant thickness image
Rg: array (height x width x n) or rgb tuple with shape (3,)
Background reflectance image or background color
Returns:
--------
refl: array (height x width x n)
n-channel reflectance image
'''
Rg = np.array(Rg)
shape = Rg.shape
# create uniform background image if Rg is rgb tuple
if len(shape) == 1: # understood as rgb tuple
h, w = D.shape
Rg_img = np.ones([h, w, 3])
Rg_img[:,:] = Rg
Rg = Rg_img
shape = Rg.shape
#print('created uniform rgb background image Rg with shape: {}'.format(shape))
n_channels = shape[-1]
K = np.array(K).reshape(1, n_channels)
S = np.array(S).reshape(1, n_channels)
D = np.array(D).reshape(-1, 1)
Rg = Rg.reshape(-1, n_channels)
# need to return infinity for K =< 0 or S < 0 in optimization code
#pos_S = S >= 0
#pos_K = K > 0 # also non-zero
#ok = pos_S & pos_K
#Rinf = np.zeros([1, n_channels])
Rinf = (S/K) / ((S/K) + 1 + np.sqrt(1 + 2 * (S/K)))
#Rinf[ok] = (S[ok]/K[ok]) / ((S[ok]/K[ok]) + 1 + np.sqrt(1 + 2 * (S[ok]/K[ok])))
#Rinf[~ok] = np.infty
Z = D * np.sqrt(K * (K + 2 * S))
Z = np.clip(Z, a_min=0, a_max=50)
beta = np.exp(2 * Z) - 1
alpha = (1 - Rinf**2) / (1 - Rg * Rinf)
refl = (alpha * Rg + beta * Rinf) / (alpha + beta)
refl = refl.reshape(shape)
return refl
|
#coding=utf-8
#!/bin/env python
import os
import base64
import socket
import numpy
import time
m_serv_ip = '10.230.147.31'
m_serv_port = 9999
def init_socket(serv_ip, serv_port):
""""""
ip_port = (serv_ip, serv_port)
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) # TCP
#sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) # UDP
sk.connect(ip_port)
sk.settimeout(50)
return sk
def send_socket(sk, b64, name):
""""""
len_content = len(b64) + len(name)
sk.sendall(bytes(str(len_content).zfill(16), encoding='utf-8')) # 发送头部
sk.sendall(bytes(name, encoding='utf-8'))
#sk.sendall(str(len_content).zfill(16)) # 发送头部
#sk.sendall(name)
sk.sendall(b64) # 发送内容
sk.close()
def img_to_b64(img_path):
"""显示一副图片"""
assert os.path.isfile(img_path)
with open(img_path, 'rb') as f:
img = f.read()
b64 = base64.b64encode(img)
return b64
def get_img_names(img_dir):
""""""
assert os.path.isdir(img_dir)
names_all = os.listdir(img_dir)
names = [name for name in names_all if name.endswith('.jpg')]
print('目录 {0} 下文件总数: {1}, 图片总数: {2}'.format(img_dir, len(names_all), len(names)))
return names
def send_batch(img_dir, img_names, start_idx, batch_num=10):
"""显示指定目录下的所有图片"""
global m_serv_ip
global m_serv_port
t0 = time.clock()
t1 = time.clock()
for cnt, img_name in enumerate(img_names[start_idx: start_idx + batch_num]):
img_path = os.path.join(img_dir, img_name)
b64 = img_to_b64(img_path) # 获得b64编码
sk = init_socket(m_serv_ip, m_serv_port)
send_socket(sk, b64, img_name.rstrip('.jpg')) # 发送数据
t2 = time.clock()
print('cnt {0} finish, time elapsed: {1}, total elapsed: {2}'.format(cnt, t2 - t1, t2 - t0))
t1 = t2
print('all finished, num send: {0}, time elapsed: {1}'.format(len(img_names), time.clock() - t0))
#sk.close()
def client(img_dir, batch_size, max_batch):
""""""
assert os.path.isdir(img_dir)
t0 = time.clock()
img_names = get_img_names(img_dir)
num_img = len(img_names)
num_finish = 0 # 已完成个数
start_idx = 0 # batch起始索引号
num_batch = 0
while num_finish < num_img:
max_num = 0
num_left = num_img - num_finish
if num_left < batch_size:
max_num = num_left
else:
max_num = batch_size
send_batch(img_dir, img_names, start_idx, max_num)
start_idx += max_num
num_finish += max_num
num_batch += 1
if num_batch >= max_batch:
break
print('client finish, time elapsed: {0}'.format(time.clock() - t0))
if __name__ == '__main__':
client('../data/problem3/train', batch_size=20, max_batch=10000)
|
"""Implement the SequentialGeometricProgram class"""
from time import time
from collections import OrderedDict
import numpy as np
from ..exceptions import InvalidGPConstraint, Infeasible, UnnecessarySGP
from ..keydict import KeyDict
from ..nomials import Variable
from .gp import GeometricProgram
from ..nomials import PosynomialInequality
from .. import NamedVariables
from .costed import CostedConstraintSet
EPS = 1e-6 # 1 +/- this is used in a few relative differences
# pylint: disable=too-many-instance-attributes
class SequentialGeometricProgram(CostedConstraintSet):
"""Prepares a collection of signomials for a SP solve.
Arguments
---------
cost : Posynomial
Objective to minimize when solving
constraints : list of Constraint or SignomialConstraint objects
Constraints to maintain when solving (implicitly Signomials <= 1)
verbosity : int (optional)
Currently has no effect: SequentialGeometricPrograms don't know
anything new after being created, unlike GeometricPrograms.
Attributes with side effects
----------------------------
`gps` is set during a solve
`result` is set at the end of a solve
Examples
--------
>>> gp = gpkit.geometric_program.SequentialGeometricProgram(
# minimize
x,
[ # subject to
1/x - y/x, # <= 1, implicitly
y/10 # <= 1
])
>>> gp.solve()
"""
gps = solver_outs = _results = result = model = None
_gp = _spvars = _lt_approxs = pccp_penalty = None
with NamedVariables("SGP"):
slack = Variable("PCCPslack")
def __init__(self, cost, model, substitutions, *,
use_pccp=True, pccp_penalty=2e2, **initgpargs):
# pylint: disable=super-init-not-called,non-parent-init-called
if cost.any_nonpositive_cs:
raise UnnecessarySGP("""Sequential GPs need Posynomial objectives.
The equivalent of a Signomial objective can be constructed by constraining
a dummy variable `z` to be greater than the desired Signomial objective `s`
(z >= s) and then minimizing that dummy variable.""")
self.model = model
self._original_cost = cost
self.externalfn_vars = \
frozenset(Variable(v) for v in self.model.varkeys if v.externalfn)
if not self.externalfn_vars:
try:
sgpconstraints = {"SP constraints": [], "GP constraints": []}
self._lt_approxs = []
for cs in model.flat():
try:
if not isinstance(cs, PosynomialInequality):
cs.as_hmapslt1(substitutions) # gp-compatible?
sgpconstraints["GP constraints"].append(cs)
except InvalidGPConstraint:
sgpconstraints["SP constraints"].append(cs)
if use_pccp:
lts = [lt/self.slack for lt in cs.as_approxlts()]
else:
lts = cs.as_approxlts()
self._lt_approxs.append(lts)
if not sgpconstraints["SP constraints"]:
raise UnnecessarySGP("""Model valid as a Geometric Program.
SequentialGeometricPrograms should only be created with Models containing
Signomial Constraints, since Models without Signomials have global
solutions and can be solved with 'Model.solve()'.""")
if use_pccp:
self.pccp_penalty = pccp_penalty
self.cost = cost * self.slack**pccp_penalty
sgpconstraints["GP constraints"].append(self.slack >= 1)
else:
self.cost = cost
self.idxlookup = {k: i for i, k in enumerate(sgpconstraints)}
list.__init__(self, sgpconstraints.values())
self.substitutions = substitutions
self._gp = self.init_gp(**initgpargs)
self.blackboxconstraints = False
return
except AttributeError:
pass # some constraint lacked
self.blackboxconstraints = True
self.__bare_init__(cost, model, substitutions)
# pylint: disable=too-many-locals,too-many-branches
# pylint: disable=too-many-arguments
# pylint: disable=too-many-statements
def localsolve(self, solver=None, *, verbosity=1, x0=None, reltol=1e-4,
iteration_limit=50, mutategp=True, **solveargs):
"""Locally solves a SequentialGeometricProgram and returns the solution.
Arguments
---------
solver : str or function (optional)
By default uses one of the solvers found during installation.
If set to "mosek", "mosek_cli", or "cvxopt", uses that solver.
If set to a function, passes that function cs, A, p_idxs, and k.
verbosity : int (optional)
If greater than 0, prints solve time and number of iterations.
Each GP is created and solved with verbosity one less than this, so
if greater than 1, prints solver name and time for each GP.
x0 : dict (optional)
Initial location to approximate signomials about.
reltol : float
Iteration ends when this is greater than the distance between two
consecutive solve's objective values.
iteration_limit : int
Maximum GP iterations allowed.
mutategp: boolean
Prescribes whether to mutate the previously generated GP
or to create a new GP with every solve.
**solveargs :
Passed to solver function.
Returns
-------
result : dict
A dictionary containing the translated solver result.
"""
self.gps, self.solver_outs, self._results = [], [], []
# if there's external functions we can't mutate the GP
mutategp = mutategp and not self.blackboxconstraints
if not mutategp and not x0:
raise ValueError("Solves with arbitrary constraint generators"
" must specify an initial starting point x0.")
if mutategp:
if x0:
self._gp = self.init_gp(x0)
gp = self._gp
starttime = time()
if verbosity > 0:
print("Starting a sequence of GP solves")
if self.externalfn_vars:
print(" for %i variables defined by externalfns"
% len(self.externalfn_vars))
elif mutategp:
print(" for %i free variables" % len(self._spvars))
print(" in %i signomial constraints"
% len(self["SP constraints"]))
print(" and for %i free variables" % len(gp.varlocs))
print(" in %i posynomial inequalities." % len(gp.k))
prevcost, cost, rel_improvement = None, None, None
while rel_improvement is None or rel_improvement > reltol:
prevcost = cost
if len(self.gps) > iteration_limit:
raise Infeasible(
"Unsolved after %s iterations. Check `m.program.results`;"
" if they're converging, try `.localsolve(...,"
" iteration_limit=NEWLIMIT)`." % len(self.gps))
if mutategp:
self.update_gp(x0)
else:
gp = self.gp(x0)
gp.model = self.model
self.gps.append(gp) # NOTE: SIDE EFFECTS
if verbosity > 1:
print("\nGP Solve %i" % len(self.gps))
if verbosity > 2:
print("===============")
solver_out = gp.solve(solver, verbosity=verbosity-1,
gen_result=False, **solveargs)
self.solver_outs.append(solver_out)
cost = float(solver_out["objective"])
x0 = dict(zip(gp.varlocs, np.exp(solver_out["primal"])))
if verbosity > 2 and self._spvars:
result = gp.generate_result(solver_out, verbosity=verbosity-3)
self._results.append(result)
print(result.table(self._spvars))
elif verbosity > 1:
print("Solved cost was %.4g." % cost)
if prevcost is None:
continue
rel_improvement = (prevcost - cost)/(prevcost + cost)
if cost*(1 - EPS) > prevcost + EPS and verbosity > -1:
print("SGP not convergent: Cost rose by %.2g%% on GP solve %i."
" Details can be found in `m.program.results` or by"
" solving at a higher verbosity. Note that convergence is"
" not guaranteed for models with SignomialEqualities.\n"
% (100*(cost - prevcost)/prevcost, len(self.gps)))
rel_improvement = cost = None
# solved successfully!
self.result = gp.generate_result(solver_out, verbosity=verbosity-3)
self.result["soltime"] = time() - starttime
if verbosity > 1:
print()
if verbosity > 0:
print("Solving took %.3g seconds and %i GP solves."
% (self.result["soltime"], len(self.gps)))
self.model.process_result(self.result)
if self.externalfn_vars:
for v in self.externalfn_vars:
self[0].insert(0, v.key.externalfn) # for constraint senss
if self.slack.key in self.result["variables"]:
excess_slack = self.result["variables"][self.slack.key] - 1
if excess_slack <= EPS:
del self.result["freevariables"][self.slack.key]
del self.result["variables"][self.slack.key]
del self.result["sensitivities"]["variables"][self.slack.key]
slackconstraint = self["GP constraints"][-1]
del self.result["sensitivities"]["constraints"][slackconstraint]
elif verbosity > -1:
print("Final solution let signomial constraints slacken by"
" %.2g%%. Calling .localsolve with a higher"
" `pccp_penalty` (it was %.3g this time) will reduce"
" final slack if the model is solvable with less. If"
" you think it might not be, check by solving with "
"`use_pccp=False, x0=(this model's final solution)`.\n"
% (100*excess_slack, self.pccp_penalty))
return self.result
# pylint: disable=too-many-locals
def localsolveonce(self, solver=None, verbosity=1, x0=None, reltol=1e-4,
iteration_limit=50, mutategp=True, **kwargs):
"""Locally solves a SequentialGeometricProgram ONCE and returns the solution.
Arguments
---------
solver : str or function (optional)
By default uses one of the solvers found during installation.
If set to "mosek", "mosek_cli", or "cvxopt", uses that solver.
If set to a function, passes that function cs, A, p_idxs, and k.
verbosity : int (optional)
If greater than 0, prints solve time and number of iterations.
Each GP is created and solved with verbosity one less than this, so
if greater than 1, prints solver name and time for each GP.
x0 : dict (optional)
Initial location to approximate signomials about.
reltol : float
Iteration ends when this is greater than the distance between two
consecutive solve's objective values.
iteration_limit : int
Maximum GP iterations allowed.
*args, **kwargs :
Passed to solver function.
Returns
-------
result : dict
A dictionary containing the translated solver result.
"""
starttime = time()
if verbosity > 0:
print("Beginning signomial solve.")
self.gps = [] # NOTE: SIDE EFFECTS
self.results = []
if x0 and mutategp:
self._gp = self.init_gp(self.substitutions, x0)
slackvar = Variable()
prevcost, cost, rel_improvement = None, None, None
while (rel_improvement is None or rel_improvement > reltol) and len(self.gps) < iteration_limit:
if len(self.gps) > iteration_limit:
raise RuntimeWarning("""problem unsolved after %s iterations.
The last result is available in Model.program.gps[-1].result. If the gps
appear to be converging, you may wish to increase the iteration limit by
calling .localsolve(..., iteration_limit=NEWLIMIT).""" % len(self.gps))
gp = self.gp(x0, mutategp)
self.gps.append(gp) # NOTE: SIDE EFFECTS
try:
result = gp.solve(solver, verbosity-1,
warn_on_check=True, **kwargs)
self.results.append(result)
except (RuntimeWarning, ValueError):
feas_constrs = ([slackvar >= 1] +
[posy <= slackvar
for posy in gp.posynomials[1:]])
primal_feas = GeometricProgram(slackvar**100 * gp.cost,
feas_constrs, None)
self.gps.append(primal_feas)
result = primal_feas.solve(solver, verbosity-1, **kwargs)
result["cost"] = None # reset the cost-counting
x0 = result["freevariables"]
prevcost, cost = cost, result["cost"]
if prevcost is None or cost is None:
rel_improvement = None
elif prevcost < (1-reltol)*cost:
print("SP is not converging! Last GP iteration had a higher"
" cost (%.2g) than the previous one (%.2g). Results for"
" each iteration are in (Model).program.results. If your"
" model contains SignomialEqualities, note that"
" convergence is not guaranteed: try replacing any"
" SigEqs you can and solving again." % (cost, prevcost))
else:
rel_improvement = abs(prevcost-cost)/(prevcost + cost)
# solved successfully!
soltime = time() - starttime
if verbosity > 0:
print("Solving took %i GP solves" % len(self.gps)
+ " and %.3g seconds." % soltime)
self.process_result(result)
self.result = SolutionArray(result.copy()) # NOTE: SIDE EFFECTS
self.result["soltime"] = soltime
if self.externalfn_vars:
for v in self.externalfn_vars:
self[0].insert(0, v.key.externalfn) # for constraint senss
return self.result
@property
def results(self):
"Creates and caches results from the raw solver_outs"
if not self._results:
self._results = [o["generate_result"]() for o in self.solver_outs]
return self._results
def _fill_x0(self, x0):
"Returns a copy of x0 with subsitutions added."
x0kd = KeyDict()
x0kd.varkeys = self.varkeys
if x0:
x0kd.update(x0) # has to occur after the setting of varkeys
x0kd.update(self.substitutions)
return x0kd
def init_gp(self, x0=None, **initgpargs):
"Generates a simplified GP representation for later modification"
x0 = self._fill_x0(x0)
constraints = OrderedDict({"SP approximations": []})
constraints["GP constraints"] = self["GP constraints"]
self._spvars = set([self.slack])
for cs, lts in zip(self["SP constraints"], self._lt_approxs):
for lt, gt in zip(lts, cs.as_approxgts(x0)):
constraint = (lt <= gt)
constraint.generated_by = cs
constraints["SP approximations"].append(constraint)
self._spvars.update({vk for vk in gt.varkeys
if vk not in self.substitutions})
gp = GeometricProgram(self.cost, constraints, self.substitutions,
**initgpargs)
gp.x0 = x0
return gp
def update_gp(self, x0):
"Update self._gp for x0."
if not self.gps:
return # we've already generated the first gp
gp = self._gp
gp.x0.update({k: v for (k, v) in x0.items() if k in self._spvars})
hmap_idx = 0
for sp_constraint, lts in zip(self["SP constraints"], self._lt_approxs):
for lt, gt in zip(lts, sp_constraint.as_approxgts(gp.x0)):
unsubbed = lt/gt
gp["SP approximations"][hmap_idx].unsubbed = [unsubbed]
hmap = unsubbed.hmap.sub(self.substitutions, unsubbed.varkeys)
hmap.parent = gp["SP approximations"][hmap_idx]
hmap_idx += 1 # here because gp.hmaps[0] is the cost hmap
gp.hmaps[hmap_idx] = hmap
gp.gen()
def gp(self, x0=None, **gpinitargs):
"The GP approximation of this SP at x0."
x0 = self._fill_x0(x0)
constraints = OrderedDict(
{"SP constraints": [c.as_gpconstr(x0) for c in self.model.flat()]})
if self.externalfn_vars:
constraints["Generated by externalfns"] = []
for v in self.externalfn_vars:
constraint = v.key.externalfn(v, x0)
constraint.generated_by = v.key.externalfn
constraints["Generated by externalfns"].append(constraint)
gp = GeometricProgram(self._original_cost,
constraints, self.substitutions, **gpinitargs)
gp.x0 = x0
return gp
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova import servicegroup
ALIAS = "os-hypervisors"
authorize = extensions.os_compute_authorizer(ALIAS)
class HypervisorsController(wsgi.Controller):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, service, detail, servers=None,
**kwargs):
alive = self.servicegroup_api.service_is_up(service)
hyp_dict = {
'id': hypervisor.id,
'hypervisor_hostname': hypervisor.hypervisor_hostname,
'state': 'up' if alive else 'down',
'status': ('disabled' if service.disabled
else 'enabled'),
}
if detail and not servers:
for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least',
'host_ip'):
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': service.id,
'host': hypervisor.host,
'disabled_reason': service.disabled_reason,
}
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in compute_nodes])
@extensions.expected_errors(())
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
True)
for hyp in compute_nodes])
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
service = self.host_api.service_get_by_compute_host(
context, hyp.host)
return dict(hypervisor=self._view_hypervisor(hyp, service, True))
@extensions.expected_errors((404, 501))
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp.host
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
service = self.host_api.service_get_by_compute_host(context, host)
return dict(hypervisor=self._view_hypervisor(hyp, service, False,
uptime=uptime))
@extensions.expected_errors(404)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors(404)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node.host)
service = self.host_api.service_get_by_compute_host(
context, compute_node.host)
hyp = self._view_hypervisor(compute_node, service, False,
instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@extensions.expected_errors(())
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.V3APIExtensionBase):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(ALIAS,
HypervisorsController(),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
def get_controller_extensions(self):
return []
|
_BASE_WWW_BILIBILI_COM="https://www.bilibili.com"
_BASE_API_BILIBILI_COM="https://api.bilibili.com"
_BASE_API_BILIBILI_COM_X="https://api.bilibili.com/x"
_BASE_API_BILIBILI_COM_X_V2="%s/v2" % _BASE_API_BILIBILI_COM_X
_BASE_WEB_INTERFACE="%s/web-interface" % _BASE_API_BILIBILI_COM_X
_BASE_API_VC_BILIBILI_COM="http://api.vc.bilibili.com"
_BASE_INTERFACE_BILIBILI_COM="https://interface.bilibili.com"
|
# -*- coding: utf-8 -*
from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import json
import os
from flask import Flask
from flask import jsonify
from flask_cors import CORS
app = Flask(__name__)
#A tester
cors = CORS(app)
#--------------------_Function_--------------
# Setup the Sheets API
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
store = file.Storage('/home/pi/Serveur/credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('/home/pi/Serveur/client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
@app.route('/')
def Welcome():
return "Hello World !"
@app.route('/sheetID/<post_id>')
def getSheetInfo(post_id):
# Call the Sheets API
#1vS3-iv0GOnHQTNMudK9yjl-KYdMQZjb7smJ6CNUa4x8
SPREADSHEET_ID = post_id
#NomDeLaFeuille!Range1:RangeTop
RANGE_NAME = 'Réponses au formulaire 1!A2:P'
try :
result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME).execute()
except Exception as e:
print("Erreur")
print(e)
values = result.get('values', [])
resultFin = []
if not values:
print('No data found.')
return ''
else:
print(values)
print("------")
for row in values:
print(row)
horodateur = row[0]
nom = row[1].upper()
prenom = row[2].upper()
nationalite = row[3]
dateNaissance = row[4]
lieuNaissance = row[5]
departement = row[6]
sexe = row[7]
adresse = row[8]
adresse2 = ''
if row[9] != "" :
adresse2 = row[9]
codePostal = row[10]
ville = row[11]
pays = row[12]
telephone = ''
if row[13] != "" :
telephone = '0'+row[13]
portable = ''
if row[14] != "" :
portable = '0'+row[14]
mail = row[15]
commentaire = ''
#if row[16] != None :
# commentaire = row[16]
activite = 'secourisme'
result = {'Horodateur':horodateur,'Nom':nom,'Prenom':prenom,'Nationalite':nationalite,'Portable':portable,'Departement':departement,'Pays':pays,'Sexe':sexe,'Mail':mail,'DateNaissance':dateNaissance,'LieuNaissance':lieuNaissance,'Adresse':adresse,'Adresse2':adresse2,'CodePostal':codePostal,'Ville':ville,'Telephone':telephone,'Activite':activite}
resultFin.append(result)
print(resultFin)
print("-----")
return jsonify(resultFin)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
import sys
from math import floor
from time import time, localtime
from shutil import get_terminal_size
from platform import system
from typing import Union
from .func import get_stdout
class ProgressBar:
def __init__(self, bar_type: str) -> None:
self.machine = False
self.hide = False
self.icon = '⏳'
self.chars = [' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█']
self.brackets = ('|', '|')
if bar_type == 'classic':
self.icon = '⏳'
self.chars = ['░', '█']
self.brackets = ('[', ']')
if bar_type == 'ascii':
self.icon = '& '
self.chars = ['-', '#']
self.brackets = ('[', ']')
if bar_type == 'machine':
self.machine = True
if bar_type == 'none':
self.hide = True
self.part_width = len(self.chars) - 1
self.ampm = True
if system() == 'Darwin' and bar_type in ('default', 'classic'):
try:
date_format = get_stdout(
['defaults', 'read', 'com.apple.menuextra.clock', 'DateFormat']
)
self.ampm = 'a' in date_format
except FileNotFoundError:
pass
@staticmethod
def pretty_time(my_time: float, ampm: bool) -> str:
new_time = localtime(my_time)
hours = new_time.tm_hour
minutes = new_time.tm_min
if ampm:
if hours == 0:
hours = 12
if hours > 12:
hours -= 12
ampm_marker = 'PM' if new_time.tm_hour >= 12 else 'AM'
return '{:02}:{:02} {}'.format(hours, minutes, ampm_marker)
return '{:02}:{:02}'.format(hours, minutes)
def tick(self, index: Union[int, float]) -> None:
if self.hide:
return
progress = min(1, max(0, index / self.total))
if progress == 0:
progress_rate = 0.0
else:
progress_rate = (time() - self.begin_time) / progress
if self.machine:
index = min(index, self.total)
raw = int(self.begin_time + progress_rate)
print('{}~{}~{}~{}~{}'.format(
self.title, index, self.total, self.begin_time, raw),
end='\r', flush=True)
return
new_time = self.pretty_time(self.begin_time + progress_rate, self.ampm)
percent = round(progress * 100, 1)
p_pad = " " * (4 - len(str(percent)))
columns = get_terminal_size().columns
bar_len = max(1, columns - (self.len_title + 32))
progress_bar_str = self.progress_bar_str(progress, bar_len)
bar = f' {self.icon}{self.title} {progress_bar_str} {p_pad}{percent}% ETA {new_time}'
if len(bar) > columns - 2:
bar = bar[:columns - 2]
else:
bar += ' ' * (columns - len(bar) - 4)
sys.stdout.write(bar + '\r')
try:
sys.stdout.flush()
except AttributeError:
pass
def start(self, total: Union[int, float], title: str='Please wait') -> None:
self.title = title
self.len_title = len(title)
self.total = total
self.begin_time = time()
try:
self.tick(0)
except UnicodeEncodeError:
self.icon = '& '
self.chars = ['-', '#']
self.brackets = ('[', ']')
self.part_width = 1
def progress_bar_str(self, progress: float, width: int) -> str:
whole_width = floor(progress * width)
remainder_width = (progress * width) % 1
part_width = floor(remainder_width * self.part_width)
part_char = self.chars[part_width]
if width - whole_width - 1 < 0:
part_char = ''
line = (
self.brackets[0]
+ self.chars[-1] * whole_width
+ part_char
+ self.chars[0] * (width - whole_width - 1)
+ self.brackets[1]
)
return line
@staticmethod
def end() -> None:
sys.stdout.write(' ' * (get_terminal_size().columns - 2) + '\r')
|
# Copyright 2014 Muchos authors (see AUTHORS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from muchos.config import DeployConfig
def test_defaults():
c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe'
assert c.checksum('accumulo') == 'baa5e0929248ff0d96355bc7fb42a5b75d183a83364519296e07b0adbb089180'
assert c.get('ec2', 'default_instance_type') == 'm5d.large'
assert c.get('ec2', 'worker_instance_type') == 'm5d.large'
assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7'
assert c.max_ephemeral() == 1
assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1']
assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]},
'worker': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}}
assert c.node_type('worker1') == 'worker'
assert c.node_type('leader1') == 'default'
assert not c.has_option('ec2', 'vpc_id')
assert not c.has_option('ec2', 'subnet_id')
assert c.get('ec2', 'key_name') == 'my_aws_key'
assert c.instance_tags() == {}
assert len(c.nodes()) == 6
assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper']
assert c.get_node('worker1') == ['worker']
assert c.get_node('worker2') == ['worker']
assert c.get_node('worker3') == ['worker']
assert c.has_service('accumulomaster')
assert not c.has_service('fluo')
assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4']
assert c.get_service_hostnames('zookeeper') == ['leader1']
assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)}
assert c.get_public_ip('leader1') == '23.0.0.0'
assert c.get_private_ip('leader1') == '10.0.0.0'
assert c.cluster_name == 'mycluster'
assert c.version("accumulo").startswith('2.')
assert c.version("fluo").startswith('1.')
assert c.version("hadoop").startswith('3.')
assert c.version("zookeeper").startswith('3.')
assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5']
assert c.get('general', 'proxy_hostname') == "leader1"
assert c.proxy_public_ip() == "23.0.0.0"
assert c.proxy_private_ip() == "10.0.0.0"
assert c.get('general', 'cluster_basedir') == "/home/centos"
assert c.get('general', 'cluster_user') == "centos"
assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'),
('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')]
assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'),
('worker1', 'worker'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')]
def test_case_sensitive():
c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.has_option('ec2', 'default_instance_type') == True
assert c.has_option('ec2', 'Default_instance_type') == False
c.set('nodes', 'CamelCaseWorker', 'worker,fluo')
c.init_nodes()
assert c.get_node('CamelCaseWorker') == ['worker', 'fluo']
|
import ast
import inspect
import keyword
import sys
import traceback
from clikit.api.io import IO
class ExceptionTrace(object):
"""
Renders the trace of an exception.
"""
THEME = {
"comment": "<fg=black;options=bold>",
"keyword": "<fg=yellow>",
"builtin": "<fg=blue>",
"literal": "<fg=magenta>",
}
AST_ELEMENTS = {
"builtins": __builtins__.keys()
if type(__builtins__) is dict
else dir(__builtins__),
"keywords": [
getattr(ast, cls)
for cls in dir(ast)
if keyword.iskeyword(cls.lower())
and inspect.isclass(getattr(ast, cls))
and issubclass(getattr(ast, cls), ast.AST)
],
}
def __init__(self, exception): # type: (Exception) -> None
self._exception = exception
self._exc_info = sys.exc_info()
def render(self, io, simple=False): # type: (IO, bool) -> None
if hasattr(self._exception, "__traceback__"):
tb = self._exception.__traceback__
else:
tb = self._exc_info[2]
title = ""
if not simple:
title += "\n[<error>{}</error>]\n".format(
self._exception.__class__.__name__
)
title += "<error>{}</error>".format(str(self._exception))
io.write_line(title)
if not simple and io.is_verbose():
io.write_line("")
self._render_traceback(io, tb)
def _render_traceback(self, io, tb): # type: (IO, ...) -> None
frames = []
while tb:
frames.append(self._format_traceback_frame(io, tb))
tb = tb.tb_next
io.write_line("<b>Traceback (most recent call last):</b>")
io.write_line("".join(traceback.format_list(frames)))
def _format_traceback_frame(self, io, tb): # type: (IO, ...) -> Tuple[Any]
frame_info = inspect.getframeinfo(tb)
filename = frame_info.filename
lineno = frame_info.lineno
function = frame_info.function
line = frame_info.code_context[0]
stripped_line = line.lstrip(" ")
try:
tree = ast.parse(stripped_line, mode="exec")
formatted = self._format_tree(tree, stripped_line, io)
formatted = (len(line) - len(stripped_line)) * " " + formatted
except SyntaxError:
formatted = line
return (
io.format("<c1>{}</c1>".format(filename)),
"<fg=blue;options=bold>{}</>".format(lineno),
"<b>{}</b>".format(function),
formatted,
)
def _format_tree(self, tree, source, io):
offset = 0
chunks = []
nodes = [n for n in ast.walk(tree)]
displayed_nodes = []
for node in nodes:
nodecls = node.__class__
nodename = nodecls.__name__
if "col_offset" not in dir(node):
continue
if nodecls in self.AST_ELEMENTS["keywords"]:
displayed_nodes.append((node, nodename.lower(), "keyword"))
elif nodecls == ast.Name and node.id in self.AST_ELEMENTS["builtins"]:
displayed_nodes.append((node, node.id, "builtin"))
elif nodecls == ast.Str:
displayed_nodes.append((node, "'{}'".format(node.s), "literal"))
elif nodecls == ast.Num:
displayed_nodes.append((node, str(node.n), "literal"))
displayed_nodes.sort(key=lambda elem: elem[0].col_offset)
for dn in displayed_nodes:
node = dn[0]
s = dn[1]
theme = dn[2]
begin_col = node.col_offset
src_chunk = source[offset:begin_col]
chunks.append(src_chunk)
chunks.append(io.format("{}{}</>".format(self.THEME[theme], s)))
offset = begin_col + len(s)
chunks.append(source[offset:])
return "".join(chunks)
|
"""
WSGI config for lithography project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lithography.settings')
application = get_wsgi_application()
|
# -*- coding: utf-8 -*-
from dotrunner.version import VERSION
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='dotrunner',
version=VERSION,
description='Links dotfiles',
long_description=readme,
author='Dotan Nahum',
data_files=[('', ['LICENSE', 'README.md', 'README.rst'])],
author_email='jondotan@gmail.com',
url='https://github.com/jondot/dotrunner',
license=license,
packages=find_packages(exclude=('tests', 'docs', 'jest-pytest')),
entry_points='''
[console_scripts]
dotrunner=dotrunner.dotrunner:main
''',
install_requires=[
'toolz', 'docopt', 'networkx', 'pyyaml', 'delegator.py', 'colorama',
'pyspin'
])
|
from cuor.organizations.api import OrganizationRecord
import traceback
def remove_nulls(d):
return {k: v for k, v in d.items() if v is not None}
def _assing_if_exist(data, record, field):
if field in record:
data[field] = record[field]
def insert_in_cuor(data, inst):
# try:
OrganizationRecord.create_or_update(None, data, dbcommit=True, reindex=True)
# except Exception as e:
# print(e)
# print("------------")
#print(data)
#print("------------")
#print(inst)
#print("------------")
#print(traceback.format_exc())
|
'''
Encoder functions for all standard FieldTypes.
Encoders are responsible for converting a python object into bytes*
*Not all encoders return bytes objects.
FieldTypes that operate on the bit level cant be expected to return
even byte sized amounts of bits, so they operate differently.
A FieldTypes serializer and encoder simply need to
be working with the same parameter and return data types.
'''
__all__ = [
# basic encoders
'encode_numeric', 'encode_string', 'no_encode',
'encode_big_int', 'encode_bit_int',
# specialized encoders
'encode_24bit_numeric', 'encode_decimal', 'encode_bit', 'encode_raw_string',
'encode_int_timestamp', 'encode_float_timestamp', 'encode_string_hex',
# wrapper functions
'encoder_wrapper',
]
from decimal import Decimal
from struct import pack
from time import mktime, strptime
from supyr_struct.defs.constants import ATTR_OFFS
def encoder_wrapper(en):
'''
This function is for wrapping encoders in functions which properly
work with FieldTypes where is_block and is_data are both True.
This is because the node will be a Block with some attribute
that stores the "data" of the node.
'''
def wrapped_encoder(
self, node, parent=None, attr_index=None, _encode=en):
return _encode(self, node.data, parent, attr_index)
return wrapped_encoder
def no_encode(self, node, parent=None, attr_index=None):
'''
Does not encode and just returns the node.
'''
return node
def encode_numeric(self, node, parent=None, attr_index=None):
'''
Encodes a python int into a bytes representation.
Encoding is done using struct.pack
Returns a bytes object encoded represention of the "node" argument.
'''
return self.struct_packer(node)
def encode_decimal(self, node, parent=None, attr_index=None):
'''
Encodes a python Decimal into a bytes representation.
Returns a bytes object encoded represention of the "node" argument.
'''
raise NotImplementedError('Encoding Decimal objects is not supported yet.')
def encode_24bit_numeric(self, node, parent=None, attr_index=None):
'''
Encodes a python int to a signed or unsigned 24-bit bytes representation.
Returns a bytes object encoded represention of the "node" argument.
'''
if self.enc[1] == 't':
# int can be signed
assert node >= -0x800000 and node <= 0x7fffff, (
'%s is too large to pack as a 24bit signed int.' % node)
if node < 0:
# int IS signed
node += 0x1000000
else:
assert node >= 0 and node <= 0xffffff, (
'%s is too large to pack as a 24bit unsigned int.' % node)
# pack and return the int
if self.endian == '<':
return pack('<I', node)[0:3]
return pack('>I', node)[1:4]
def encode_int_timestamp(self, node, parent=None, attr_index=None):
'''
'''
return self.struct_packer(int(mktime(strptime(node))))
def encode_float_timestamp(self, node, parent=None, attr_index=None):
'''
'''
return self.struct_packer(float(mktime(strptime(node))))
def encode_string(self, node, parent=None, attr_index=None):
'''
Encodes a python string into a bytes representation,
making sure there is a delimiter character on the end.
Encoding is done using str.encode
Returns a bytes object encoded represention of the "node" argument.
'''
if not node.endswith(self.str_delimiter):
return (node + self.str_delimiter).encode(self.enc)
return node.encode(self.enc)
def encode_raw_string(self, node, parent=None, attr_index=None):
'''
Encodes a python string into a bytes representation.
Encoding is done using str.encode
Returns a bytes object encoded represention of the "node" argument.
'''
return node.encode(self.enc)
def encode_string_hex(self, node, parent=None, attr_index=None):
'''
Encodes a python string formatted as a hex string into a bytes object.
Returns a bytes object encoded represention of the "node" argument.
'''
return int(node, 16).to_bytes((len(node) + 1)//2, 'big')
def encode_big_int(self, node, parent=None, attr_index=None):
'''
Encodes arbitrarily sized signed or unsigned integers
on the byte level in either ones or twos compliment.
Encoding is done using int.to_bytes
Returns a bytes object encoded represention of the "node" argument.
'''
bytecount = parent.get_size(attr_index)
if not bytecount:
return b''
if self.endian == '<':
endian = 'little'
else:
endian = 'big'
if self.enc[-1] == 'S':
# twos compliment
return node.to_bytes(bytecount, endian, signed=True)
elif self.enc[-1] == 's':
# ones compliment
if node < 0:
return (node-1).to_bytes(bytecount, endian, signed=True)
return node.to_bytes(bytecount, endian, signed=False)
return node.to_bytes(bytecount, endian)
def encode_bit(self, node, parent=None, attr_index=None):
'''
Encodes an int to a single bit.
Returns the encoded int, the offset it should be
shifted to, and a mask that covers its range.
'''
# return the int with the bit offset and a mask of 1
return(node, parent.ATTR_OFFS[attr_index], 1)
def encode_bit_int(self, node, parent=None, attr_index=None):
'''
Encodes arbitrarily sized signed or unsigned integers
on the bit level in either ones or twos compliment
Returns the encoded int, the offset it should be
shifted to, and a mask that covers its range.
'''
bitcount = parent.get_size(attr_index)
offset = parent.ATTR_OFFS[attr_index]
mask = (1 << bitcount) - 1
# if the number is signed
if node < 0:
signmask = 1 << (bitcount - 1)
if self.enc == 'S':
# twos signed
return(2*signmask + node, offset, mask)
# ones signed
return(2*signmask + (node-1), offset, mask)
return(node, offset, mask)
|
import tensorflow as tf
from networks.network import Network
from ..fast_rcnn.config import cfg
import pdb
n_classes = 21
_feat_stride = [16,]
anchor_scales = [2,4,8,16,32]
class Resnet50_test(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.im_info = tf.placeholder(tf.float32, shape=[None, 3])
self.scene = tf.placeholder(tf.float32, shape=[1, 205])
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data':self.data, 'im_info':self.im_info})
self.trainable = trainable
self.setup()
def setup(self):
n_classes = cfg.NCLASSES
#anchor_scales = cfg.ANCHOR_SCALES
_feat_stride = [16, ]
(self.feed('data')
.conv(7, 7, 64, 2, 2, relu=False, name='conv1')
.batch_normalization(relu=True, name='bn_conv1',is_training=False)
.max_pool(3, 3, 2, 2, padding='VALID',name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(name='bn2a_branch1',is_training=False,relu=False))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(relu=True, name='bn2a_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(relu=True, name='bn2a_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(name='bn2a_branch2c',is_training=False,relu=False))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(relu=True, name='bn2b_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(relu=True, name='bn2b_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(name='bn2b_branch2c',is_training=False,relu=False))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(relu=True, name='bn2c_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(relu=True, name='bn2c_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(name='bn2c_branch2c',is_training=False,relu=False))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID')
.batch_normalization(name='bn3a_branch1',is_training=False,relu=False))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn3a_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(relu=True, name='bn3a_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(name='bn3a_branch2c',is_training=False,relu=False))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')
.batch_normalization(relu=True, name='bn3b_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')
.batch_normalization(relu=True, name='bn3b_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')
.batch_normalization(name='bn3b_branch2c',is_training=False,relu=False))
(self.feed('res3a_relu',
'bn3b_branch2c')
.add(name='res3b')
.relu(name='res3b_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')
.batch_normalization(relu=True, name='bn3c_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')
.batch_normalization(relu=True, name='bn3c_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')
.batch_normalization(name='bn3c_branch2c',is_training=False,relu=False))
(self.feed('res3b_relu',
'bn3c_branch2c')
.add(name='res3c')
.relu(name='res3c_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')
.batch_normalization(relu=True, name='bn3d_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')
.batch_normalization(relu=True, name='bn3d_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')
.batch_normalization(name='bn3d_branch2c',is_training=False,relu=False))
(self.feed('res3c_relu',
'bn3d_branch2c')
.add(name='res3d')
.relu(name='res3d_relu')
.conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID')
.batch_normalization(name='bn4a_branch1',is_training=False,relu=False))
(self.feed('res3d_relu')
.conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn4a_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(relu=True, name='bn4a_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(name='bn4a_branch2c',is_training=False,relu=False))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')
.batch_normalization(relu=True, name='bn4b_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')
.batch_normalization(relu=True, name='bn4b_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')
.batch_normalization(name='bn4b_branch2c',is_training=False,relu=False))
(self.feed('res4a_relu',
'bn4b_branch2c')
.add(name='res4b')
.relu(name='res4b_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')
.batch_normalization(relu=True, name='bn4c_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')
.batch_normalization(relu=True, name='bn4c_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')
.batch_normalization(name='bn4c_branch2c',is_training=False,relu=False))
(self.feed('res4b_relu',
'bn4c_branch2c')
.add(name='res4c')
.relu(name='res4c_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')
.batch_normalization(relu=True, name='bn4d_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')
.batch_normalization(relu=True, name='bn4d_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')
.batch_normalization(name='bn4d_branch2c',is_training=False,relu=False))
(self.feed('res4c_relu',
'bn4d_branch2c')
.add(name='res4d')
.relu(name='res4d_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')
.batch_normalization(relu=True, name='bn4e_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')
.batch_normalization(relu=True, name='bn4e_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')
.batch_normalization(name='bn4e_branch2c',is_training=False,relu=False))
(self.feed('res4d_relu',
'bn4e_branch2c')
.add(name='res4e')
.relu(name='res4e_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')
.batch_normalization(relu=True, name='bn4f_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')
.batch_normalization(relu=True, name='bn4f_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')
.batch_normalization(name='bn4f_branch2c',is_training=False,relu=False))
(self.feed('res4e_relu',
'bn4f_branch2c')
.add(name='res4f')
.relu(name='res4f_relu'))
#========= RPN ============
(self.feed('res4f_relu')
.conv(3,3,512,1,1,name='rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))
(self.feed('rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
(self.feed('rpn_cls_score')
.spatial_reshape_layer(2, name = 'rpn_cls_score_reshape')
.spatial_softmax(name='rpn_cls_prob'))
(self.feed('rpn_cls_prob')
.spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape'))
(self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
.proposal_layer(_feat_stride, anchor_scales, 'TEST',name = 'rois'))
(self.feed('rois', 'im_info')
.union_box_layer(name='whole_box'))
(self.feed('conv5_3', 'whole_box')
.roi_pool(7, 7, 1.0/16, name='whole_pool'))
#==========================================#
(self.feed('res4f_relu', 'rois')
.roi_pool(7, 7, 1.0/16, name='pool_5'))
(self.feed('pool_5','whole_pool')
.concat(axis=0, name='concat')
.fc(4096, name='fc6'))
(self.feed('rois', 'fc6')
.edge_box_layer(n_boxes=256,fc_dim=64,feat_dim=4096,dim=(4096, 4096, 4096),group=64, index=1,name='edges'))
(self.feed('fc6', 'edges')
.structure_inference_spmm(boxes=256, name='inference')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
(self.feed('inference')
.fc(n_classes*4, relu=False, name='bbox_pred'))
|
#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import yaml
from charmhelpers.core.host import service_pause, service_resume
from charmhelpers.core.hookenv import action_fail
from charmhelpers.core.unitdata import HookData, kv
from charmhelpers.contrib.openstack.utils import (
get_os_codename_package,
set_os_workload_status,
)
from lib.swift_storage_utils import (
assess_status,
REQUIRED_INTERFACES,
SWIFT_SVCS,
)
from hooks.swift_storage_hooks import (
CONFIGS,
)
def _get_services():
"""Return a list of services that need to be (un)paused."""
services = SWIFT_SVCS[:]
# Before Icehouse there was no swift-container-sync
if get_os_codename_package("swift-container") < "icehouse":
services.remove("swift-container-sync")
return services
def get_action_parser(actions_yaml_path, action_name,
get_services=_get_services):
"""Make an argparse.ArgumentParser seeded from actions.yaml definitions."""
with open(actions_yaml_path) as fh:
doc = yaml.load(fh)[action_name]["description"]
parser = argparse.ArgumentParser(description=doc)
parser.add_argument("--services", default=get_services())
# TODO: Add arguments for params defined in the actions.yaml
return parser
def pause(args):
"""Pause all the swift services.
@raises Exception if any services fail to stop
"""
for service in args.services:
stopped = service_pause(service)
if not stopped:
raise Exception("{} didn't stop cleanly.".format(service))
with HookData()():
kv().set('unit-paused', True)
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
charm_func=assess_status)
def resume(args):
"""Resume all the swift services.
@raises Exception if any services fail to start
"""
for service in args.services:
started = service_resume(service)
if not started:
raise Exception("{} didn't start cleanly.".format(service))
with HookData()():
kv().set('unit-paused', False)
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
charm_func=assess_status)
# A dictionary of all the defined actions to callables (which take
# parsed arguments).
ACTIONS = {"pause": pause, "resume": resume}
def main(argv):
action_name = _get_action_name()
actions_yaml_path = _get_actions_yaml_path()
parser = get_action_parser(actions_yaml_path, action_name)
args = parser.parse_args(argv)
try:
action = ACTIONS[action_name]
except KeyError:
return "Action %s undefined" % action_name
else:
try:
action(args)
except Exception as e:
action_fail(str(e))
def _get_action_name():
"""Return the name of the action."""
return os.path.basename(__file__)
def _get_actions_yaml_path():
"""Return the path to actions.yaml"""
cwd = os.path.dirname(__file__)
return os.path.join(cwd, "..", "actions.yaml")
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
import mysql
import pickle
import hashlib
import mysql.connector
from mysql.connector import pooling
import settings
import datetime
from time import sleep
def initDatabase():
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("SET sql_notes = 0; ")
cursor.execute("create database IF NOT EXISTS youtubebot")
cursor.execute("USE youtubebot;")
cursor.execute("SET sql_notes = 0; ")
cursor.execute("set global max_allowed_packet=67108864;")
cursor.execute("create table IF NOT EXISTS users (username varchar(70),password varchar(80), status varchar(80));")
cursor.execute("create table IF NOT EXISTS videogenerators (generatorname varchar(70),password varchar(80), status varchar(80));")
# youtube account, estimated length, actual length
cursor.execute("create table IF NOT EXISTS scripts (scriptno int NOT NULL AUTO_INCREMENT, PRIMARY KEY (scriptno), submission_id varchar(70), subredditid varchar(70), subreddit varchar(70), url varchar(2083), timecreated DATETIME,"
"status varchar(70), editedby varchar(70), scripttitle varchar(2083), scriptauthor varchar(70), ups int, downs int, num_comments int, timegathered DATETIME, timeuploaded DATETIME, sceduledupload DATETIME, esttime time, actualtime time, rawscript MEDIUMBLOB, "
"finalscript MEDIUMBLOB);")
cursor.execute("SET sql_notes = 1; ")
connection_pool = None
def login(username, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM users WHERE username = %s AND password = %s;"%(repr(username), repr(password))
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
flag = (result[0][0])
if flag == 0:
return False
else:
return True
def getScriptEditInformation():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, status, editedby FROM scripts WHERE status = 'EDITING' AND editedby IS NOT NULL;"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def completeUpload(scriptno, timeuploaded, scedualedrelease):
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = 'SUCCESSUPLOAD', timeuploaded = %s, sceduledupload = %s WHERE scriptno = %s;"
args = (timeuploaded, scedualedrelease, scriptno)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def getLastUploadedScripts():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
now = datetime.datetime.now()
cursor.execute("USE youtubebot;")
query = "SELECT timeuploaded "\
"from scripts "\
"WHERE timeuploaded <= '%s' "\
"ORDER BY timeuploaded DESC "\
"LIMIT 6;" % (now.strftime('%Y-%m-%d %H:%M:%S'))
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def getCompletedScripts():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, status, editedby FROM scripts WHERE status = 'COMPLETE' AND editedby IS NOT NULL;"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def getOnlineUsers():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT username FROM users WHERE status = 'ONLINE';"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res[0])
cursor.close()
connection_object.close()
return results
def updateScriptStatus(status, user, scriptid):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if user is None:
user = "NULL"
else:
user = user
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = %s, editedby = %s WHERE scriptno = %s;"
args = (status, user, scriptid)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def updateScriptStatusById(status, user, scriptid):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if user is None:
user = "NULL"
else:
user = user
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = %s, editedby = %s WHERE submission_id = %s;"
args = (status, user, scriptid)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def updateUserStatus(user, status):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if status is None:
status = "NULL"
else:
status = repr(status)
cursor.execute("USE youtubebot;")
query = "UPDATE users " \
"SET status = %s WHERE username = %s;"
args = (status, user)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def getScriptStatus(scriptno):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT status " \
"FROM scripts WHERE scriptno = %s;"%(scriptno)
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
return result[0][0]
def getScriptIds():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, submission_id, status " \
"FROM scripts;"
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
return result
def getCompletedScripts(back):
global connection_pool
try:
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, scripttitle, scriptauthor, ups, finalscript " \
"FROM scripts WHERE status = 'COMPLETE' AND finalscript IS NOT NULL ORDER BY ups DESC " \
"LIMIT %s;"%back
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
scriptno = res[0]
scripttitle = res[1]
author = res[2]
ups = res[3]
scriptpayload = pickle.loads(res[4])
load = (scriptno, scripttitle, author, ups, scriptpayload)
results.append(load)
cursor.close()
connection_object.close()
return results
except Exception as e:
print("Mysql Error with downloading completed scripts")
print(e)
pass
def getScripts(back, filter):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, subreddit, scripttitle, scriptauthor, ups, downs, rawscript, submission_id, status, editedby, num_comments " \
"FROM scripts WHERE status = 'RAW' or status = 'EDITING' ORDER BY %s DESC " \
"LIMIT %s;"%(filter, back)
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
scriptno = res[0]
subreddit = res[1]
title = res[2]
author = res[3]
ups = res[4]
downs = res[5]
rawscript = pickle.loads(res[6])
sub_id = res[7]
status = res[8]
editedby = res[9]
num_comments = res[10]
load = (scriptno, subreddit, title, author, ups, downs, rawscript, sub_id, status, editedby, num_comments)
results.append(load)
cursor.close()
connection_object.close()
return results
def addUser(username, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "INSERT INTO users(username, password) " \
"VALUES(%s, %s)"
args = (username, hashlib.md5(password.encode()).hexdigest())
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def addVideoGenerator(name, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "INSERT INTO videogenerators(generatorname, password) " \
"VALUES(%s, %s)"
args = (name, hashlib.md5(password.encode()).hexdigest())
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def beginDataBaseConnection():
global connection_pool
connection_pool = pooling.MySQLConnectionPool(
pool_size=32,
pool_reset_session=True,
host=settings.database_host,
user=settings.database_user,
passwd=settings.database_password,
auth_plugin='mysql_native_password'
)
print("Started database connection")
def uploadVid(payload, scriptno):
global connection_pool
try:
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
cursor.execute("set global max_allowed_packet=67108864;")
connection_object.commit()
load = pickle.dumps(payload)
print("%s SERVER attempting to upload script no %s (%s) to database" % (datetime.datetime.now(), scriptno, str((len(load) / 1000000)) + "MB"))
query = "UPDATE scripts SET finalscript = %s WHERE scriptno = %s " \
""
args = (load, scriptno)
cursor.execute(query, args)
connection_object.commit()
except Exception as e:
print("Error while connecting to MySQL using Connection pool ", e)
return False
finally:
if (connection_object.is_connected()):
cursor.close()
connection_object.close()
return True
def updateSubmission(submission):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
rawscript = pickle.dumps(submission.comments)
query = "UPDATE scripts " \
"SET scripttitle = %s, rawscript = %s, ups = %s, downs = %s, num_comments = %s, timecreated = %s, timegathered = %s WHERE submission_id = %s"
args = (submission.title, (rawscript), submission.upvotes, submission.downvotes, submission.amountcomments,
submission.timecreated, submission.timegathered, submission.submission_id)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def addSubmission(submission):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
rawscript = pickle.dumps(submission.comments)
query = "INSERT INTO scripts(subredditid, submission_id, subreddit, url, timecreated, status, scripttitle, scriptauthor, timegathered, rawscript, ups, downs, num_comments) " \
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
args = ((submission.subredditid), (submission.submission_id),
(submission.subreddit), (submission.link), (submission.timecreated),
("RAW"), submission.title, (submission.author), (submission.timegathered), rawscript,
submission.upvotes, submission.downvotes, submission.amountcomments)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def checkValueExists(column, value):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM scripts WHERE %s = %s;"%(column, repr(value))
cursor.execute(query)
result = cursor.fetchall()
flag = (result[0][0])
if flag == 0:
return False
else:
return True
def getVideoCountFromStatus(status):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM scripts WHERE status=%s;"%(repr(status))
cursor.execute(query)
result = cursor.fetchall()
return (result[0][0])
def getRowCount(tablename):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
cursor.execute("select count(*) from %s"%tablename)
result = cursor.fetchall()
return (result[0][0])
|
#!/usr/bin/env python
import argparse
from datetime import date
import hashlib
import logging
import sys
import textwrap
from classes.resource import Resource
from classes.dbmanager import ResourceStorage
from classes.reporter import HtmlReport
import helpers
def get_reports_path(path):
today = date.today()
return "{0}/{1}/{2}/".format(path, today.month, today.day)
def check_differences(resources, report):
report.add_urls(resources)
changed_resources = []
for resource in resources:
actual_content = helpers.fetch_resource(resource.url)
if actual_content:
if (hashlib.sha256(actual_content).hexdigest() != resource.content.hash):
report.add(resource, actual_content)
resource.update(actual_content)
changed_resources.append(resource)
report.save()
return changed_resources
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="diffcheck.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Resource Difference Checker
See https://github.com/bayotop/resdiffcheck for more information.
"""))
parser.add_argument("db", help="database with resources to check")
parser.add_argument("report_dir", help="target directory for reports (without trailing /)")
parser.add_argument("-l", "--logfile", default="process.log", help="default ./process.log")
args = parser.parse_args()
logging.basicConfig(filename=args.logfile,level=logging.DEBUG)
storage = ResourceStorage(args.db)
if not storage.load():
sys.exit()
report = HtmlReport(get_reports_path(args.report_dir), "diff.html")
changed_resources = check_differences(storage.getall(), report)
if changed_resources:
storage.add_multiple(changed_resources)
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from cStringIO import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, basestring):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print >> fp, tab + msg.get_content_type(),
if include_default:
print >> fp, '[%s]' % msg.get_default_type()
else:
print >> fp
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
|
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
popover = html.Div(
[
dbc.Button(
"Click to toggle popover", id="popover-target", color="danger"
),
dbc.Popover(
[
dbc.PopoverHeader("Popover header"),
dbc.PopoverBody("And here's some amazing content. Cool!"),
],
id="popover",
is_open=False,
target="popover-target",
),
]
)
@app.callback(
Output("popover", "is_open"),
[Input("popover-target", "n_clicks")],
[State("popover", "is_open")],
)
def toggle_popover(n, is_open):
if n:
return not is_open
return is_open
|
from datetime import datetime
from matplotlib import pylab as plt
from requests_cache import CachedSession
CACHE_EXPIRATION_SECS = 3600*24*356
YEAR_RANGE = range(2018, 2022)
MARKERS = ["o", "s", "d", "+", "*"]
RIRS = {
'AFRINIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/afrinic.tal/',
'marker': 'o',
},
'APNIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/apnic.tal/',
'marker': 's',
},
'ARIN': {
'url': 'https://ftp.ripe.net/ripe/rpki/arin.tal/',
'marker': 'd'
},
'LACNIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/lacnic.tal/',
'marker': '+',
},
'RIPE': {
'url': 'https://ftp.ripe.net/ripe/rpki/ripencc.tal/',
'marker': '*',
}
}
session = CachedSession(ExpirationTime = CACHE_EXPIRATION_SECS)
plt.figure(figsize=(7,4))
for rir, rir_info in RIRS.items():
x = []
y = []
for year in YEAR_RANGE:
for month in range(1,13):
roa_count = -1 # skip the header
parsed_url = f'{rir_info["url"]}/{year}/{month:02d}/15/roas.csv'
csv = session.get( parsed_url )
if csv.status_code != 200:
print(parsed_url)
print(csv.status_code)
continue
for line in csv.iter_lines(decode_unicode=True):
roa_count += 1
if roa_count > 0:
x.append( datetime(year, month, 15) )
y.append( roa_count )
plt.plot(x, y, label=rir, marker=rir_info['marker'])
plt.grid( True )
plt.legend()
plt.ylabel('Number of ROAs')
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.png')
plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.pdf')
|
# -*- coding: utf-8 -*-
"""This module contains an adapter for reading Modbus data and sending it to Procem RTL worker."""
# Copyright (c) TUT Tampere University of Technology 2015-2018.
# This software has been developed in Procem-project funded by Business Finland.
# This code is licensed under the MIT license.
# See the LICENSE.txt in the project root for the license terms.
#
# Main author(s): Ville Heikkila, Otto Hylli, Pekka Itavuo,
# Teemu Laukkarinen ja Ulla-Talvikki Virta
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
import datetime
import queue
import socket
import threading
import time
import sys
try:
import adapters.common_utils as common_utils
import adapters.modbus_generic_model as modbus_generic_model
import adapters.modbus_utils as modbus_utils
except:
# used when running the module directly
import common_utils
import modbus_generic_model
import modbus_utils
PROCEM_SERVER_IP = common_utils.PROCEM_SERVER_IP
PROCEM_SERVER_PORT = common_utils.PROCEM_SERVER_PORT
# maximum size for UDP payload. Current value taken from mxelectrix_adapter
UDP_MAX_SIZE = common_utils.UDP_MAX_SIZE
# To reduce UDP traffic buffer the data sending to procem_rtl using this global queue
data_queue = queue.Queue()
# The default names of the configuration files from where the data model information is read
CONFIG_SCHEME_FILE_NAME = "modbus_solarplant_config.json"
MEASUREMENT_ID_FILE_NAME = "Solar_Plant_measurement_IDs_v3.csv"
# The default name of the model file
MODEL_NAME = "modbus_generic_model"
# The supported register types and their reading functions
SUPPORTED_REGISTER_TYPES = {
"input": "read_input_registers",
"holding": "read_holding_registers"
}
def readRegisterGroups(client, device, register_groups, measurement_queue):
"""Reads several groups of Modbus registers and sends the data to Procem RTL."""
delay = device.delay / 1000 # time interval between reading different registers in seconds
unitid = device.unit_id
for register_group in register_groups:
start_register = register_group.start_register
register_count = register_group.register_count
register_type = register_group.type
if register_type not in SUPPORTED_REGISTER_TYPES:
print(common_utils.getTimeString(), "ERROR: Register type", register_type, "not supported")
continue
try:
# time1 = time.time()
current_start_register = start_register
current_register_count_max = register_count
current_register_count = current_register_count_max
received_registers = []
timestamps = []
while len(received_registers) < register_count and current_register_count > 0:
# time1 = time.time()
resp = getattr(client, SUPPORTED_REGISTER_TYPES[register_type])(
current_start_register, current_register_count, unit=unitid)
tm = int(round(time.time() * 1000))
# time2 = time.time()
if getattr(resp, "registers", None) is None:
# print("failed: ", unitid, ": ", current_start_register, "-",
# current_start_register + current_register_count - 1,
# " (", current_register_count, "), read time: ", time2 - time1, sep="")
current_register_count_max //= 2
else:
# print("success: ", unitid, ": ", current_start_register, "-",
# current_start_register + current_register_count - 1,
# " (", current_register_count, "), read time: ", time2 - time1, sep="")
received_registers += resp.registers
timestamps += [tm] * len(resp.registers)
current_start_register += len(resp.registers)
if len(received_registers) < register_count:
old_end_register = current_start_register - 1
(current_start_register, current_end_register) = register_group.getPart(
current_start_register, current_register_count_max)
if current_start_register is None or current_end_register is None:
current_register_count = 0
else:
skipped_registers = current_start_register - old_end_register - 1
if skipped_registers > 0:
received_registers += [0] * skipped_registers
timestamps += [tm] * skipped_registers
current_register_count = current_end_register - current_start_register + 1
if len(received_registers) < register_count and current_register_count > 0:
time.sleep(delay / 10)
if len(received_registers) >= register_count:
# time2 = time.time()
# print("success: ", unitid, ": registers: ", start_register, "-",
# start_register + register_count - 1, ", read time: ", time2 - time1, sep="")
measurement_queue.put({
"register_group": register_group,
"response_data": received_registers,
"timestamps": timestamps})
else:
print(common_utils.getTimeString(), " ERROR: (", device.ip, ", ", unitid,
"): Failure to read registers: ", start_register, "-", start_register + register_count - 1,
" (", resp, ")", sep="")
except Exception as error:
print(common_utils.getTimeString(), " ERROR: could not read registers ", start_register,
"-", start_register + register_count - 1, " from (", device.ip, ", ", unitid, ")", sep="")
print(error)
# Sleep for a little bit before reading the next register group
time.sleep(delay)
def sendMeasurementsToProcem(device, measurement_queue):
"""Sends a collection of measurements to Procem RTL."""
while True:
measurement = measurement_queue.get()
if measurement is None:
break
register_group = measurement["register_group"]
response_data = measurement["response_data"]
timestamps = measurement["timestamps"]
for register_id, count in register_group.registers:
index = register_id - register_group.start_register
register_type = device.registers[register_id]
register_values = response_data[index:index + count]
timestamp = timestamps[index + count - 1]
# parse the data and create a Procem packet and put it in the data queue
new_pkt = modbus_utils.getProcemRTLpkt(register_values, register_type, timestamp)
data_queue.put(new_pkt)
def ModBusWorker(device):
"""Reads registers periodically from a Modbus device and sends the data to Procem RTL."""
ip = device.ip
port = device.port
source_ip = device.source_ip
source_port = device.source_port
interval = device.interval / 1000 # time interval between reading the same register in seconds
start_time = time.time()
# start the measurement handling thread
measurement_queue = queue.Queue()
threading.Thread(
target=sendMeasurementsToProcem,
kwargs={"device": device, "measurement_queue": measurement_queue},
daemon=True).start()
kwargs = {"host": ip}
if port is not None:
kwargs["port"] = port
if source_ip is not None:
kwargs["source_address"] = (source_ip, source_port)
kwargs["timeout"] = 300
client = ModbusClient(**kwargs)
client.connect()
print(common_utils.getTimeString(), "INFO: Connected ModBus server at address " + ip)
try:
# First handle the read once registers
readRegisterGroups(client, device, device.read_once_groups, measurement_queue)
loop_count = 0
day = datetime.date.today().day
start_time = time.time()
while True:
current_day = datetime.date.today().day
# Handle the read once register again if it is a new day
if current_day != day:
print(common_utils.getTimeString(), loop_count, "packages sent from", ip, "on day", day)
readRegisterGroups(client, device, device.read_once_groups, measurement_queue)
day = current_day
start_time += loop_count * interval
loop_count = 0
current_time = time.time()
sleep_time = loop_count * interval - (current_time - start_time)
if sleep_time > 0:
time.sleep(sleep_time)
readRegisterGroups(client, device, device.groups, measurement_queue)
loop_count += 1
# Print the send information once in an hour.
if loop_count % 3600 == 0:
print(common_utils.getTimeString(), loop_count, "packages sent from", ip, "on day", day)
except OSError as err:
print(common_utils.getTimeString(), "ERROR: unexpected behavior in thread of IP", ip, "error was", err)
finally:
# sleep for safety?
client.close()
print(common_utils.getTimeString(), "INFO: Closing the connection to ModBus server")
def startModBusAdapter(data_model):
"""Starts separate worker thread for each Modbus device in the data model."""
devices = data_model.devices
for device_id, device in devices.items():
print(common_utils.getTimeString(), "Starting thread for device", device_id)
threading.Thread(target=ModBusWorker, kwargs={"device": device}, daemon=True).start()
if __name__ == "__main__":
if len(sys.argv) == 3:
CONFIG_SCHEME_FILE_NAME = sys.argv[1]
MEASUREMENT_ID_FILE_NAME = sys.argv[2]
elif len(sys.argv) != 1:
print("Start this adapter with 'python3", sys.argv[0], "config_scheme.json measurement_ids.csv' command")
print("or use 'python3 ", sys.argv[0], "' to use the default configuration.", sep="")
quit()
print(common_utils.getTimeString(), "Reading modbus configurations from",
CONFIG_SCHEME_FILE_NAME, "and", MEASUREMENT_ID_FILE_NAME)
# Read the model name from the configuration file
config = common_utils.readConfig(CONFIG_SCHEME_FILE_NAME)
MODEL_NAME = config.get("model_name", MODEL_NAME)
# import the correct model code and load the model information from the configuration files
model = __import__(MODEL_NAME)
field_info_class = getattr(model, "getFieldStorage")
create_measurement_function = getattr(model, "getCreateFunction")
data_model = modbus_generic_model.loadModel(
config_filename=CONFIG_SCHEME_FILE_NAME,
csv_filename=MEASUREMENT_ID_FILE_NAME,
field_model=field_info_class(),
create_measurement=create_measurement_function())
# start the Procem send worker that takes the values from data_queue and sends them to procem_rtl
threading.Thread(target=common_utils.procemSendWorker, kwargs={"data_queue": data_queue}).start()
startModBusAdapter(data_model)
while True:
txt = input("Press enter key to end:\n\r")
if not txt:
data_queue.put(None)
break
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.embed
from .x_hatch_window_factory import XHatchWindowFactory as XHatchWindowFactory_167d0e95
class HatchWindowFactory(XHatchWindowFactory_167d0e95):
"""
Service Class
**since**
LibreOffice 4.1
See Also:
`API HatchWindowFactory <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1embed_1_1HatchWindowFactory.html>`_
"""
__ooo_ns__: str = 'com.sun.star.embed'
__ooo_full_ns__: str = 'com.sun.star.embed.HatchWindowFactory'
__ooo_type_name__: str = 'service'
__all__ = ['HatchWindowFactory']
|
import os
import re
class ConfigBase(object):
def __init__(self, path):
self.path = path
@classmethod
def squash_int_range(cls, ilist):
"""Takes a list of integers and squashes consecutive values into a
string range. Returned list contains mix of strings and ints.
"""
irange = []
rstart = None
rprev = None
sorted(ilist)
for i, value in enumerate(ilist):
if rstart is None:
if i == (len(ilist) - 1):
irange.append(value)
break
rstart = value
if rprev is not None:
if rprev != (value - 1):
if rstart == rprev:
irange.append(rstart)
else:
irange.append("{}-{}".format(rstart, rprev))
if i == (len(ilist) - 1):
irange.append(value)
rstart = value
elif i == (len(ilist) - 1):
irange.append("{}-{}".format(rstart, value))
break
rprev = value
return ','.join(irange)
@classmethod
def expand_value_ranges(cls, ranges):
"""
Takes a string containing ranges of values such as 1-3 and 4,5,6,7 and
expands them into a single list.
"""
if not ranges:
return ranges
expanded = []
ranges = ranges.split(',')
for subrange in ranges:
# expand ranges
subrange = subrange.partition('-')
if subrange[1] == '-':
expanded += range(int(subrange[0]), int(subrange[2]) + 1)
else:
for val in subrange[0].split():
expanded.append(int(val))
return sorted(expanded)
@property
def exists(self):
if os.path.exists(self.path):
return True
return False
def get(self, key, section=None, expand_to_list=False):
raise NotImplementedError
class SectionalConfigBase(ConfigBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sections = {}
# this provides an easy sectionless lookup but is prone to collisions.
# always returns the last value for key found in config file.
self._flattened_config = {}
self._load()
@staticmethod
def bool_str(val):
if val.lower() == "true":
return True
elif val.lower() == "false":
return False
return val
@property
def all(self):
return self._sections
def get(self, key, section=None, expand_to_list=False):
""" If section is None use flattened """
if section is None:
value = self._flattened_config.get(key)
else:
value = self._sections.get(section, {}).get(key)
if expand_to_list:
return self.expand_value_ranges(value)
return value
@property
def dump(self):
with open(self.path) as fd:
return fd.read()
def _load(self):
if not self.exists:
return
current_section = None
with open(self.path) as fd:
for line in fd:
if re.compile(r"^\s*#").search(line):
continue
# section names are not expected to contain whitespace
ret = re.compile(r"^\s*\[(\S+)].*").search(line)
if ret:
current_section = ret.group(1)
self._sections[current_section] = {}
continue
if current_section is None:
continue
# key names may contain whitespace
# values may contain whitespace
expr = r"^\s*(\S+(?:\s+\S+)?)\s*=\s*(.+)\s*"
ret = re.compile(expr).search(line)
if ret:
key = ret.group(1)
val = self.bool_str(ret.group(2))
if type(val) == str:
val = val.strip()
for char in ["'", '"']:
val = val.strip(char)
self._sections[current_section][key] = val
self._flattened_config[key] = val
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.monitoring_v3.services.alert_policy_service import pagers
from google.cloud.monitoring_v3.types import alert
from google.cloud.monitoring_v3.types import alert_service
from google.cloud.monitoring_v3.types import mutation_record
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AlertPolicyServiceGrpcTransport
from .transports.grpc_asyncio import AlertPolicyServiceGrpcAsyncIOTransport
class AlertPolicyServiceClientMeta(type):
"""Metaclass for the AlertPolicyService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AlertPolicyServiceTransport]]
_transport_registry["grpc"] = AlertPolicyServiceGrpcTransport
_transport_registry["grpc_asyncio"] = AlertPolicyServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AlertPolicyServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AlertPolicyServiceClient(metaclass=AlertPolicyServiceClientMeta):
"""The AlertPolicyService API is used to manage (list, create, delete,
edit) alert policies in Stackdriver Monitoring. An alerting policy
is a description of the conditions under which some aspect of your
system is considered to be "unhealthy" and the ways to notify people
or services about this state. In addition to using this API, alert
policies can also be managed through `Stackdriver
Monitoring <https://cloud.google.com/monitoring/docs/>`__, which can
be reached by clicking the "Monitoring" tab in `Cloud
Console <https://console.cloud.google.com/>`__.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "monitoring.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AlertPolicyServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AlertPolicyServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AlertPolicyServiceTransport:
"""Returns the transport used by the client instance.
Returns:
AlertPolicyServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def alert_policy_path(project: str, alert_policy: str,) -> str:
"""Returns a fully-qualified alert_policy string."""
return "projects/{project}/alertPolicies/{alert_policy}".format(
project=project, alert_policy=alert_policy,
)
@staticmethod
def parse_alert_policy_path(path: str) -> Dict[str, str]:
"""Parses a alert_policy path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/alertPolicies/(?P<alert_policy>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def alert_policy_condition_path(
project: str, alert_policy: str, condition: str,
) -> str:
"""Returns a fully-qualified alert_policy_condition string."""
return "projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}".format(
project=project, alert_policy=alert_policy, condition=condition,
)
@staticmethod
def parse_alert_policy_condition_path(path: str) -> Dict[str, str]:
"""Parses a alert_policy_condition path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/alertPolicies/(?P<alert_policy>.+?)/conditions/(?P<condition>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AlertPolicyServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the alert policy service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, AlertPolicyServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AlertPolicyServiceTransport):
# transport is a AlertPolicyServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def list_alert_policies(
self,
request: alert_service.ListAlertPoliciesRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAlertPoliciesPager:
r"""Lists the existing alerting policies for the
workspace.
Args:
request (google.cloud.monitoring_v3.types.ListAlertPoliciesRequest):
The request object. The protocol for the
`ListAlertPolicies` request.
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__
whose alert policies are to be listed. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]
Note that this field names the parent container in which
the alerting policies to be listed are stored. To
retrieve a single alerting policy by name, use the
[GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy]
operation, instead.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.monitoring_v3.services.alert_policy_service.pagers.ListAlertPoliciesPager:
The protocol for the ListAlertPolicies response.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a alert_service.ListAlertPoliciesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, alert_service.ListAlertPoliciesRequest):
request = alert_service.ListAlertPoliciesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_alert_policies]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListAlertPoliciesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_alert_policy(
self,
request: alert_service.GetAlertPolicyRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> alert.AlertPolicy:
r"""Gets a single alerting policy.
Args:
request (google.cloud.monitoring_v3.types.GetAlertPolicyRequest):
The request object. The protocol for the
`GetAlertPolicy` request.
name (str):
Required. The alerting policy to retrieve. The format
is:
::
projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.monitoring_v3.types.AlertPolicy:
A description of the conditions under which some aspect of your system is
considered to be "unhealthy" and the ways to notify
people or services about this state. For an overview
of alert policies, see [Introduction to
Alerting](\ https://cloud.google.com/monitoring/alerts/).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a alert_service.GetAlertPolicyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, alert_service.GetAlertPolicyRequest):
request = alert_service.GetAlertPolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_alert_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_alert_policy(
self,
request: alert_service.CreateAlertPolicyRequest = None,
*,
name: str = None,
alert_policy: alert.AlertPolicy = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> alert.AlertPolicy:
r"""Creates a new alerting policy.
Args:
request (google.cloud.monitoring_v3.types.CreateAlertPolicyRequest):
The request object. The protocol for the
`CreateAlertPolicy` request.
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__
in which to create the alerting policy. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]
Note that this field names the parent container in which
the alerting policy will be written, not the name of the
created policy. \|name\| must be a host project of a
workspace, otherwise INVALID_ARGUMENT error will return.
The alerting policy that is returned will have a name
that contains a normalized representation of this name
as a prefix but adds a suffix of the form
``/alertPolicies/[ALERT_POLICY_ID]``, identifying the
policy in the container.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
alert_policy (google.cloud.monitoring_v3.types.AlertPolicy):
Required. The requested alerting policy. You should omit
the ``name`` field in this policy. The name will be
returned in the new policy, including a new
``[ALERT_POLICY_ID]`` value.
This corresponds to the ``alert_policy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.monitoring_v3.types.AlertPolicy:
A description of the conditions under which some aspect of your system is
considered to be "unhealthy" and the ways to notify
people or services about this state. For an overview
of alert policies, see [Introduction to
Alerting](\ https://cloud.google.com/monitoring/alerts/).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, alert_policy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a alert_service.CreateAlertPolicyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, alert_service.CreateAlertPolicyRequest):
request = alert_service.CreateAlertPolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if alert_policy is not None:
request.alert_policy = alert_policy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_alert_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_alert_policy(
self,
request: alert_service.DeleteAlertPolicyRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an alerting policy.
Args:
request (google.cloud.monitoring_v3.types.DeleteAlertPolicyRequest):
The request object. The protocol for the
`DeleteAlertPolicy` request.
name (str):
Required. The alerting policy to delete. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
For more information, see
[AlertPolicy][google.monitoring.v3.AlertPolicy].
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a alert_service.DeleteAlertPolicyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, alert_service.DeleteAlertPolicyRequest):
request = alert_service.DeleteAlertPolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_alert_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def update_alert_policy(
self,
request: alert_service.UpdateAlertPolicyRequest = None,
*,
update_mask: field_mask_pb2.FieldMask = None,
alert_policy: alert.AlertPolicy = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> alert.AlertPolicy:
r"""Updates an alerting policy. You can either replace the entire
policy with a new one or replace only certain fields in the
current alerting policy by specifying the fields to be updated
via ``updateMask``. Returns the updated alerting policy.
Args:
request (google.cloud.monitoring_v3.types.UpdateAlertPolicyRequest):
The request object. The protocol for the
`UpdateAlertPolicy` request.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. A list of alerting policy field names. If this
field is not empty, each listed field in the existing
alerting policy is set to the value of the corresponding
field in the supplied policy (``alert_policy``), or to
the field's default value if the field is not in the
supplied alerting policy. Fields not listed retain their
previous value.
Examples of valid field masks include ``display_name``,
``documentation``, ``documentation.content``,
``documentation.mime_type``, ``user_labels``,
``user_label.nameofkey``, ``enabled``, ``conditions``,
``combiner``, etc.
If this field is empty, then the supplied alerting
policy replaces the existing policy. It is the same as
deleting the existing policy and adding the supplied
policy, except for the following:
- The new policy will have the same
``[ALERT_POLICY_ID]`` as the former policy. This
gives you continuity with the former policy in your
notifications and incidents.
- Conditions in the new policy will keep their former
``[CONDITION_ID]`` if the supplied condition includes
the ``name`` field with that ``[CONDITION_ID]``. If
the supplied condition omits the ``name`` field, then
a new ``[CONDITION_ID]`` is created.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
alert_policy (google.cloud.monitoring_v3.types.AlertPolicy):
Required. The updated alerting policy or the updated
values for the fields listed in ``update_mask``. If
``update_mask`` is not empty, any fields in this policy
that are not in ``update_mask`` are ignored.
This corresponds to the ``alert_policy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.monitoring_v3.types.AlertPolicy:
A description of the conditions under which some aspect of your system is
considered to be "unhealthy" and the ways to notify
people or services about this state. For an overview
of alert policies, see [Introduction to
Alerting](\ https://cloud.google.com/monitoring/alerts/).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([update_mask, alert_policy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a alert_service.UpdateAlertPolicyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, alert_service.UpdateAlertPolicyRequest):
request = alert_service.UpdateAlertPolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if update_mask is not None:
request.update_mask = update_mask
if alert_policy is not None:
request.alert_policy = alert_policy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_alert_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("alert_policy.name", request.alert_policy.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-monitoring",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("AlertPolicyServiceClient",)
|
from .policy import DDPG
from .trainer import DDPGTrainer
from .loss import DDPGLoss
NAME = "DDPG"
LOSS = DDPGLoss
TRAINER = DDPGTrainer
POLICY = DDPG
CONFIG = {
"training": {
"update_interval": 1,
"batch_size": 1024,
"tau": 0.01,
"optimizer": "Adam",
"actor_lr": 1e-2,
"critic_lr": 1e-2,
"grad_norm_clipping": 0.5,
},
"policy": {},
}
|
import glob
import os
import tarfile
from subprocess import check_call
import modules.config as c
import modules.functions as f
def run_task_build_pdfium():
f.debug("Building PDFium...")
target = "android"
build_dir = os.path.join("build", target)
f.create_dir(build_dir)
target_dir = os.path.join(build_dir, "pdfium")
f.remove_dir(target_dir)
cwd = build_dir
command = " ".join(
[
"gclient",
"config",
"--unmanaged",
"https://pdfium.googlesource.com/pdfium.git",
]
)
check_call(command, cwd=cwd, shell=True)
gclient_file = os.path.join(build_dir, ".gclient")
f.append_to_file(gclient_file, "target_os = [ 'android' ]")
cwd = build_dir
command = " ".join(["gclient", "sync"])
check_call(command, cwd=cwd, shell=True)
cwd = target_dir
command = " ".join(["git", "checkout", c.pdfium_git_commit])
check_call(command, cwd=cwd, shell=True)
def run_task_patch():
f.debug("Patching...")
source_dir = os.path.join("build", "android", "pdfium")
# build gn
source_file = os.path.join(
source_dir,
"BUILD.gn",
)
if f.file_line_has_content(source_file, 25, " ]\n"):
f.replace_line_in_file(source_file, 25, ' "FPDFSDK_EXPORTS",\n ]\n')
f.debug("Applied: Build GN")
else:
f.debug("Skipped: Build GN")
# build gn flags
source_file = os.path.join(
source_dir,
"BUILD.gn",
)
if f.file_line_has_content(source_file, 19, " cflags = []\n"):
f.replace_line_in_file(
source_file, 19, ' cflags = [ "-fvisibility=default" ]\n'
)
f.debug("Applied: Build GN Flags")
else:
f.debug("Skipped: Build GN Flags")
pass
def run_task_build():
f.debug("Building libraries...")
current_dir = os.getcwd()
# configs
for config in c.configurations_android:
# targets
for target in c.targets_android:
main_dir = os.path.join(
"build",
target["target_os"],
"pdfium",
"out",
"{0}-{1}-{2}".format(target["target_os"], target["target_cpu"], config),
)
f.remove_dir(main_dir)
f.create_dir(main_dir)
os.chdir(
os.path.join(
"build",
target["target_os"],
"pdfium",
)
)
# generating files...
f.debug(
'Generating files to arch "{0}" and configuration "{1}"...'.format(
target["target_cpu"], config
)
)
arg_is_debug = "true" if config == "debug" else "false"
args = []
args.append('target_os="{0}"'.format(target["pdfium_os"]))
args.append('target_cpu="{0}"'.format(target["target_cpu"]))
args.append("use_goma=false")
args.append("is_debug={0}".format(arg_is_debug))
args.append("pdf_use_skia=false")
args.append("pdf_use_skia_paths=false")
args.append("pdf_enable_xfa=false")
args.append("pdf_enable_v8=false")
args.append("is_component_build=true")
args.append("pdf_is_standalone=true")
args.append("pdf_bundle_freetype=true")
if config == "release":
args.append("symbol_level=0")
args_str = " ".join(args)
command = " ".join(
[
"gn",
"gen",
"out/{0}-{1}-{2}".format(
target["target_os"], target["target_cpu"], config
),
"--args='{0}'".format(args_str),
]
)
check_call(command, shell=True)
# compiling...
f.debug(
'Compiling to arch "{0}" and configuration "{1}"...'.format(
target["target_cpu"], config
)
)
command = " ".join(
[
"ninja",
"-C",
"out/{0}-{1}-{2}".format(
target["target_os"], target["target_cpu"], config
),
"pdfium",
"-v",
]
)
check_call(command, shell=True)
os.chdir(current_dir)
def run_task_install():
f.debug("Installing libraries...")
# configs
for config in c.configurations_android:
f.remove_dir(os.path.join("build", "android", config))
f.create_dir(os.path.join("build", "android", config))
# targets
for target in c.targets_android:
out_dir = "{0}-{1}-{2}".format(
target["target_os"], target["target_cpu"], config
)
source_lib_dir = os.path.join("build", "android", "pdfium", "out", out_dir)
lib_dir = os.path.join("build", "android", config, "lib")
target_dir = os.path.join(lib_dir, target["android_cpu"])
f.remove_dir(target_dir)
f.create_dir(target_dir)
for basename in os.listdir(source_lib_dir):
if basename.endswith(".so"):
pathname = os.path.join(source_lib_dir, basename)
if os.path.isfile(pathname):
f.copy_file2(pathname, target_dir)
# include
include_dir = os.path.join("build", "android", "pdfium", "public")
target_include_dir = os.path.join("build", "android", config, "include")
f.remove_dir(target_include_dir)
f.create_dir(target_include_dir)
for basename in os.listdir(include_dir):
if basename.endswith(".h"):
pathname = os.path.join(include_dir, basename)
if os.path.isfile(pathname):
f.copy_file2(pathname, target_include_dir)
def run_task_test():
f.debug("Testing...")
for config in c.configurations_android:
for target in c.targets_android:
lib_dir = os.path.join(
"build", "android", config, "lib", target["android_cpu"]
)
command = " ".join(["file", os.path.join(lib_dir, "libpdfium.so")])
check_call(command, shell=True)
def run_task_archive():
f.debug("Archiving...")
current_dir = os.getcwd()
lib_dir = os.path.join(current_dir, "build", "android")
output_filename = os.path.join(current_dir, "android.tgz")
tar = tarfile.open(output_filename, "w:gz")
for configuration in c.configurations_android:
tar.add(
name=os.path.join(lib_dir, configuration),
arcname=os.path.basename(os.path.join(lib_dir, configuration)),
filter=lambda x: (
None
if "_" in x.name
and not x.name.endswith(".h")
and not x.name.endswith(".so")
and os.path.isfile(x.name)
else x
),
)
tar.close()
|
"""
General networks for pytorch.
Algorithm-specific networks should go else-where.
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
from rlkit.policies.base import Policy
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.core import PyTorchModule
from rlkit.torch.data_management.normalizer import TorchFixedNormalizer
from rlkit.torch.modules import LayerNorm
import math
def identity(x):
return x
class Mlp(PyTorchModule):
def __init__(
self,
hidden_sizes,
output_size,
input_size,
init_w=3e-3,
hidden_activation=F.relu,
output_activation=identity,
hidden_init=ptu.fanin_init,
b_init_value=0.1,
layer_norm=False,
layer_norm_kwargs=None,
):
self.save_init_params(locals())
super().__init__()
if layer_norm_kwargs is None:
layer_norm_kwargs = dict()
self.input_size = input_size
self.output_size = output_size
self.hidden_sizes = hidden_sizes
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.layer_norm = layer_norm
self.fcs = []
self.layer_norms = []
in_size = input_size
for i, next_size in enumerate(hidden_sizes):
fc = nn.Linear(in_size, next_size)
in_size = next_size
hidden_init(fc.weight)
fc.bias.data.fill_(b_init_value)
self.__setattr__("fc{}".format(i), fc)
self.fcs.append(fc)
if self.layer_norm:
ln = LayerNorm(next_size)
self.__setattr__("layer_norm{}".format(i), ln)
self.layer_norms.append(ln)
self.last_fc = nn.Linear(in_size, output_size)
self.last_fc.weight.data.uniform_(-init_w, init_w)
self.last_fc.bias.data.uniform_(-init_w, init_w)
def forward(self, input, return_preactivations=False):
h = input
for i, fc in enumerate(self.fcs):
h = fc(h)
if self.layer_norm and i < len(self.fcs) - 1:
h = self.layer_norms[i](h)
h = self.hidden_activation(h)
preactivation = self.last_fc(h)
output = self.output_activation(preactivation)
if return_preactivations:
return output, preactivation
else:
return output
class FlattenMlp(Mlp):
"""
if there are multiple inputs, concatenate along dim 1
"""
def forward(self, *inputs, **kwargs):
flat_inputs = torch.cat(inputs, dim=1)
return super().forward(flat_inputs, **kwargs)
class MlpPolicy(Mlp, Policy):
"""
A simpler interface for creating policies.
"""
def __init__(
self,
*args,
obs_normalizer: TorchFixedNormalizer = None,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.obs_normalizer = obs_normalizer
def forward(self, obs, **kwargs):
if self.obs_normalizer:
obs = self.obs_normalizer.normalize(obs)
return super().forward(obs, **kwargs)
def get_action(self, obs_np):
actions = self.get_actions(obs_np[None])
return actions[0, :], {}
def get_actions(self, obs):
return self.eval_np(obs)
class TanhMlpPolicy(MlpPolicy):
"""
A helper class since most policies have a tanh output activation.
"""
def __init__(self, *args, **kwargs):
self.save_init_params(locals())
super().__init__(*args, output_activation=torch.tanh, **kwargs)
class MlpEncoder(FlattenMlp):
'''
encode context via MLP
'''
def reset(self, num_tasks=1):
pass
def forward_seq(self,context):
t,b,_ = context.size()
input = context.view(t*b,-1)
out = self.forward(input)
return out.view(t,b,-1)
class RecurrentEncoder(FlattenMlp):
'''
encode context via recurrent network
'''
def __init__(self,
*args,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.hidden_dim = self.hidden_sizes[-1]
self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))
# input should be (task, seq, feat) and hidden should be (task, 1, feat)
self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)
def forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))
self.hidden = hn
# take the last hidden state to predict z
out = out[:, -1, :]
# output layer
preactivation = self.last_fc(out)
output = self.output_activation(preactivation)
if return_preactivations:
return output, preactivation
else:
return output
def reset(self, num_tasks=1):
self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)
class RNN(FlattenMlp):
'''
encode context via recurrent network
'''
def __init__(self,
*args,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.hidden_dim = self.hidden_sizes[-1]
self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))
# input should be (task, seq, feat) and hidden should be (task, 1, feat)
self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)
def inner_forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))
self.hidden = hn
# take the last hidden state to predict z
out = out.contiguous()
out = out.view(task * seq, -1)
# output layer
#preactivation = self.last_fc(out)
#output = self.output_activation(preactivation)
if return_preactivations:
return out, out
else:
return out
def forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))
self.hidden = hn
# take the last hidden state to predict z
out = out.contiguous()
out = out.view(task * seq, -1)
# output layer
preactivation = self.last_fc(out)
output = self.output_activation(preactivation)
if return_preactivations:
return output, output
else:
return output
def inner_reset(self, num_tasks=1):
self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)
class SnailEncoder(FlattenMlp):
def __init__(self,
input_length,
*args,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.hidden_dim = self.hidden_sizes[-1]
self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))
self.input_length = input_length
# input should be (task, seq, feat) and hidden should be (1, task, feat)
#self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)
layer_count = math.ceil(math.log(input_length)/math.log(2))
self.TC1 = TCBlock(self.hidden_dim,input_length,16)
self.atten1 = AttentionBlock(self.hidden_dim+16*layer_count,32,32)
self.TC2 = TCBlock(self.hidden_dim+16*layer_count+32,input_length,16)
self.atten2 = AttentionBlock(self.hidden_dim+16*layer_count*2+32,32,32)
self.out_layer = nn.Linear(self.hidden_dim+16*layer_count*2+32+32,self.output_size)
self.var_start = int(self.output_size / 2)
def forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out = out.permute(0,2,1)
#print(out.shape)
out = self.TC1(out)
out = self.atten1(out)
out = self.TC2(out)
out = self.atten2(out)
out = out[:, :, -1]
#print('o',out.shape)
# output layer
preactivation = self.out_layer(out)
output = self.output_activation(preactivation)
#temp = F.softplus(output[..., self.var_start:])
#output[..., self.var_start:] = temp
if return_preactivations:
return output, preactivation
else:
return output
def forward_seq(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
in_ = in_.contiguous()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out = out.permute(0,2,1)
#print(out.shape)
out = self.TC1(out)
out = self.atten1(out)
out = self.TC2(out)
out = self.atten2(out)
out = out.permute(0,2,1)
out = out.view(task * seq,-1)
preactivation = self.out_layer(out)
output = self.output_activation(preactivation)
#temp = F.softplus(output[..., self.var_start:])
#output[..., self.var_start:] = temp
#output = output.view(task,seq,-1)
if return_preactivations:
return output, preactivation
else:
return output
def reset(self,num_tasks=1):
return
class MyMlpEncoder(FlattenMlp):
'''
encode context via MLP
'''
def reset(self, num_tasks=1):
pass
def forward_seq(self,context):
t,b,_ = context.size()
input = context.view(t*b,-1)
out = self.forward(input)
return out
def forward(self,context):
t,b,_ = context.size()
input = context.view(t*b,-1)
out = self.forward(input)
return out
class CausalConv1d(nn.Module):
"""A 1D causal convolution layer.
Input: (B, D_in, T), where B is the minibatch size, D_in is the number of
dimensions per step, and T is the number of steps.
Output: (B, D_out, T), where B is the minibatch size, D_out is the number
of dimensions in the output, and T is the number of steps.
Arguments:
in_channels (int): number of input channels
out_channels (int): number of output channels
"""
def __init__(self, in_channels, out_channels, dilation=1):
super(CausalConv1d, self).__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(
in_channels,
out_channels,
2,
padding = self.padding,
dilation = dilation
)
def forward(self, minibatch):
return self.causal_conv(minibatch)[:, :, :-self.padding]
class DenseBlock(nn.Module):
"""Two parallel 1D causal convolution layers w/tanh and sigmoid activations
Input: (B, D_in, T), where B is the minibatch size, D_in is the number of
dimensions of the input, and T is the number of steps.
Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the
number of dimensions of the input, `F` is the number of filters, and `T`
is the length of the input sequence.
Arguments:
in_channels (int): number of input channels
filters (int): number of filters per channel
"""
def __init__(self, in_channels, filters, dilation=1):
super(DenseBlock, self).__init__()
self.causal_conv1 = CausalConv1d(
in_channels,
filters,
dilation=dilation
)
self.causal_conv2 = CausalConv1d(
in_channels,
filters,
dilation=dilation
)
def forward(self, minibatch):
tanh = F.tanh(self.causal_conv1(minibatch))
sig = F.sigmoid(self.causal_conv2(minibatch))
out = torch.cat([minibatch, tanh*sig], dim=1)
return out
class TCBlock(nn.Module):
"""A stack of DenseBlocks which dilates to desired sequence length
The TCBlock adds `ceil(log_2(seq_len))*filters` channels to the output.
Input: (B, D_in, T), where B is the minibatch size, D_in is the number of
dimensions of the input, and T is the number of steps.
Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the
number of dimensions of the input, `F` is the number of filters, and `T`
is the length of the input sequence.
Arguments:
in_channels (int): channels for the input
seq_len (int): length of the sequence. The number of denseblock layers
is log base 2 of `seq_len`.
filters (int): number of filters per channel
"""
def __init__(self, in_channels, seq_len, filters):
super(TCBlock, self).__init__()
layer_count = math.ceil(math.log(seq_len)/math.log(2))
blocks = []
channel_count = in_channels
for layer in range(layer_count):
block = DenseBlock(channel_count, filters, dilation=2**layer)
blocks.append(block)
channel_count += filters
self.blocks = nn.Sequential(*blocks)
def forward(self, minibatch):
return self.blocks(minibatch)
class AttentionBlock(nn.Module):
"""An attention mechanism similar to Vaswani et al (2017)
The input of the AttentionBlock is `BxDxT` where `B` is the input
minibatch size, `D` is the dimensions of each feature, `T` is the length of
the sequence.
The output of the AttentionBlock is `Bx(D+V)xT` where `V` is the size of the
attention values.
Arguments:
input_dims (int): the number of dimensions (or channels) of each element
in the input sequence
k_size (int): the size of the attention keys
v_size (int): the size of the attention values
"""
def __init__(self, input_dims, k_size, v_size):
super(AttentionBlock, self).__init__()
self.key_layer = nn.Linear(input_dims, k_size)
self.query_layer = nn.Linear(input_dims, k_size)
self.value_layer = nn.Linear(input_dims, v_size)
self.sqrt_k = math.sqrt(k_size)
def forward(self, minibatch):
minibatch = minibatch.permute(0,2,1)
keys = self.key_layer(minibatch)
queries = self.query_layer(minibatch)
values = self.value_layer(minibatch)
logits = torch.bmm(queries, keys.transpose(2,1))
mask = logits.data.new(logits.size(1), logits.size(2)).fill_(1).byte()
mask = torch.triu(mask, 1)
mask = mask.unsqueeze(0).expand_as(logits)
logits.data.masked_fill_(mask, float('-inf'))
probs = F.softmax(logits / self.sqrt_k, dim=2)
read = torch.bmm(probs, values)
return torch.cat([minibatch, read], dim=2).permute(0,2,1)
|
'''
multilinha
comentario
aqui
'''
a = 10
b = 2
val1 = 123456
val2 = "sopa de ....."
val3 = 123.123
print("\n")
# operadores logicos
print(a == b) # vc ja sabe oq eh
print(a != b) # vc ja sabe oq eh
print(a < b) # vc ja sabe oq eh
print(a >= b) # vc ja sabe oq eh
print(a <= b) # vc ja sabe oq eh
print("\n")
# maths
print(a**b) # pot
print(a**(a + b)) # pot e parentss
print(a**(a + b) % 7) #pot e mod
print("\n")
#alguns tipos
print(type(val1)) # diz tipo da var
print(type(val2)) # diz tipo da var
print(type(val3)) # diz tipo da var
print(type(a)) # diz tipo da var
print(type(b)) # diz tipo da var
print("\n")
#strings
palavra = 'tecnologicamente_avancada' # strigs sao objetos em python
print(palavra[0]) # podem ser acessadas as letras desta forma
print(palavra[1]) # de veras
print(palavra[2]) # muito
print(palavra[3]) # tecnologicamente avancada
print(palavra[4])
print(2 * palavra[5]) # 2x oq esta na pos 5
print(palavra[5:10]) # aquilo que estra entre a pos 5 e 10
print(palavra[:10]) # aquilo que vem antes da pos 10
print(palavra[10:]) # aquilo que ve dps da pos 10
print(palavra[1:10:2]) # entre pos 1 e 10 com increento de 2
print(palavra[15::-1]) # da pos 15 pra baixo, ao contrario
print("\n")
#listas
lista = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(type(lista))
print(lista[0] + lista[1]) # soma, para elementos dentro das listas
lista = lista + [0, 1, 2, 3] # juntar listas
print(lista)
print(lista[-1]) # ultimo endereco
print(lista[-2]) # penultimo endereco, etc...
''' CONTINUAR LISTAS '''
|
from setuptools import setup, find_packages
import sys
with open('requirements.txt') as f:
reqs = f.read()
reqs = reqs.strip().split('\n')
install = [req for req in reqs if not req.startswith("git+git://")]
depends = [req.replace("git+git://", "git+http://") for req in reqs if req.startswith("git+git://")]
setup(
name='fever-api',
version='0.0.0',
author='James Thorne',
author_email='james@jamesthorne.co.uk',
url='https://jamesthorne.co.uk',
description='Fact Extraction and VERification API',
long_description="readme",
license="Apache 2.0",
python_requires='>=3.5',
package_dir={'fever': 'src/fever',
'fever.api': 'src/fever/api'},
packages=['fever',
'fever.api'
],
install_requires=install,
dependency_links=depends,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from clinicgen.text.sentsplit import LineBreakSplitter, NLTKSentenceSplitter, SpaCySentenceSplitter, StanzaSentenceSplitter
class TestLineBreakSplitter(unittest.TestCase):
def test_split(self):
splitter = LineBreakSplitter()
text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.'
sents = splitter.split(text)
self.assertEqual(len(sents), 2)
self.assertTrue(sents[0].startswith('Hello'))
self.assertTrue(sents[1].startswith('of'))
class TestNLTKSentenceSplitter(unittest.TestCase):
def test_split(self):
splitter = NLTKSentenceSplitter()
text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.'
sents = splitter.split(text)
self.assertEqual(len(sents), 4)
self.assertTrue(sents[0].startswith('Hello'))
self.assertTrue(sents[1].startswith('Running'))
self.assertTrue(sents[2].startswith('of'))
self.assertTrue(sents[3].startswith('Line'))
class TestSpaCySentenceSplitter(unittest.TestCase):
def test_split(self):
splitter = SpaCySentenceSplitter('en_core_web_sm')
text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.'
sents = splitter.split(text)
self.assertEqual(len(sents), 4)
self.assertTrue(sents[0].startswith('Hello'))
self.assertTrue(sents[1].startswith('Running'))
self.assertTrue(sents[2].startswith('of'))
self.assertTrue(sents[3].startswith('Line'))
class TestStanzaSentenceSplitter(unittest.TestCase):
def test_split(self):
splitter = StanzaSentenceSplitter()
text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.'
sents = splitter.split(text)
self.assertEqual(len(sents), 4)
self.assertTrue(sents[0].startswith('Hello'))
self.assertTrue(sents[1].startswith('Running'))
self.assertTrue(sents[2].startswith('of'))
self.assertTrue(sents[3].startswith('Line'))
|
import os
import stat
from pathlib import Path
from shutil import move, rmtree
from typing import Optional, Set, Union
from warnings import warn
from cookiecutter.generate import generate_files
from git import Repo
from .cookiecutter import CookiecutterContext, generate_cookiecutter_context
from .cruft import CruftState
from .iohelper import AltTemporaryDirectory
try:
import toml
except ImportError: # pragma: no cover
toml = None # type: ignore
def cookiecutter_template(
output_dir: Path,
repo: Repo,
cruft_state: CruftState,
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
checkout: Optional[str] = None,
deleted_paths: Set[Path] = None,
update_deleted_paths: bool = False,
) -> CookiecutterContext:
"""Generate a clean cookiecutter template in output_dir."""
if deleted_paths is None:
deleted_paths = set()
pyproject_file = project_dir / "pyproject.toml"
commit = checkout or repo.remotes.origin.refs["HEAD"]
repo.head.reset(commit=commit, working_tree=True)
context = _generate_output(cruft_state, Path(repo.working_dir), cookiecutter_input, output_dir)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
if update_deleted_paths:
deleted_paths.update(_get_deleted_files(output_dir, project_dir))
# We now remove skipped and deleted paths from the project
_remove_paths(output_dir, skip_paths | deleted_paths) # type: ignore
return context
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, project_dir: Path, cookiecutter_input: bool, output_dir: Path
) -> CookiecutterContext:
inner_dir = project_dir / (cruft_state.get("directory") or "")
new_context = generate_cookiecutter_context(
cruft_state["template"],
inner_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
# This generates the cookiecutter template.
# Unfortunately, cookiecutter doesn't let us output the template in an
# arbitrary directory. It insists on creating the initial project directory.
# Therefore we have to move the directory content to the expected output_dir.
# See https://github.com/cookiecutter/cookiecutter/pull/907
output_dir.mkdir(parents=True, exist_ok=True)
with AltTemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
# Kindly ask cookiecutter to generate the template
template_dir = generate_files(
repo_dir=inner_dir, context=new_context, overwrite_if_exists=True, output_dir=tmpdir
)
template_dir = Path(template_dir)
# Move the template content to the output directory
for name in os.listdir(template_dir):
move(str(template_dir / name), str(output_dir))
return new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
elif pyproject_file.is_file():
warn(
"pyproject.toml is present in repo, but `toml` package is not installed. "
"Cruft configuration may be ignored."
)
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_readonly(func, path, _): # pragma: no cov_4_nix
"""Clear the readonly bit and reattempt the removal."""
os.chmod(path, stat.S_IWRITE) # WINDOWS
func(path)
def _remove_single_path(path: Path):
if path.is_dir():
try:
rmtree(path, ignore_errors=False, onerror=_remove_readonly)
except Exception: # pragma: no cover
raise Exception("Failed to remove directory.")
# rmtree(path)
elif path.is_file():
# path.unlink()
try:
path.unlink()
except PermissionError: # pragma: no cov_4_nix
path.chmod(stat.S_IWRITE)
path.unlink()
except Exception as exc: # pragma: no cover
raise Exception("Failed to remove file.") from exc
def _remove_paths(root: Path, paths_to_remove: Set[Union[Path, str]]):
# There is some redundancy here in chmoding dirs and/or files differently.
abs_paths_to_remove = []
for path_to_remove in paths_to_remove:
if isinstance(path_to_remove, Path):
abs_paths_to_remove.append(root / path_to_remove)
elif isinstance(path_to_remove, str): # assumes the string is a glob-pattern
abs_paths_to_remove += list(root.glob(path_to_remove))
else:
warn(f"{path_to_remove} is not a Path object or a string glob-pattern")
for path in abs_paths_to_remove:
_remove_single_path(path)
|
import matplotlib.pyplot as plt
import os
def plot_cost(cost):
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Cost')
plt.plot(cost)
fig.tight_layout()
plot_filename = os.path.join(os.getcwd(), 'figures', 'cost.png')
plt.savefig(plot_filename)
plt.show()
|
from .settable_generator import *
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image datasets."""
from tensorflow_datasets.image.cats_vs_dogs import CatsVsDogs
from tensorflow_datasets.image.celeba import CelebA
from tensorflow_datasets.image.celebahq import CelebAHq
from tensorflow_datasets.image.chexpert import Chexpert
from tensorflow_datasets.image.cifar import Cifar10
from tensorflow_datasets.image.cifar import Cifar100
from tensorflow_datasets.image.coco import Coco2014
from tensorflow_datasets.image.colorectal_histology import ColorectalHistology
from tensorflow_datasets.image.colorectal_histology import ColorectalHistologyLarge
from tensorflow_datasets.image.diabetic_retinopathy_detection import DiabeticRetinopathyDetection
from tensorflow_datasets.image.flowers import TFFlowers
from tensorflow_datasets.image.horses_or_humans import HorsesOrHumans
from tensorflow_datasets.image.image_folder import ImageLabelFolder
from tensorflow_datasets.image.imagenet import Imagenet2012
from tensorflow_datasets.image.lsun import Lsun
from tensorflow_datasets.image.mnist import FashionMNIST
from tensorflow_datasets.image.mnist import KMNIST
from tensorflow_datasets.image.mnist import MNIST
from tensorflow_datasets.image.omniglot import Omniglot
from tensorflow_datasets.image.open_images import OpenImagesV4
from tensorflow_datasets.image.quickdraw import QuickdrawBitmap
from tensorflow_datasets.image.rock_paper_scissors import RockPaperScissors
from tensorflow_datasets.image.svhn import SvhnCropped
|
from enum import Enum
from datetime import datetime, date
from dateutil.relativedelta import relativedelta, MO
import argparse
import holidays
import pandas as pd
class BGEHolidays(holidays.HolidayBase):
def _populate(self, year):
holidays.UnitedStates._populate(self, year)
# Remove Martin Luther King Day
self.pop(date(year, 1, 1) + relativedelta(weekday=MO(+3)), None)
# Remove Columbus Day
self.pop(date(year, 10, 1) + relativedelta(weekday=MO(+2)), None)
# Remove Veterans Day
self.pop(date(year, 11, 11), None)
# Add good friday
self[holidays.easter(year) + relativedelta(days=-2)] = 'Good Friday'
class TimeOfUse(Enum):
peak = 0
shoulder = 1
offpeak = 2
class Season(Enum):
Winter = 0
Summer = 1
@classmethod
def get(cls, dt):
d = dt.date()
if date(dt.year, 6, 1) <= d and date(dt.year, 9, 30) >= d:
return cls.Summer
return cls.Winter
class Schedule(Enum):
R = 'R'
RL = 'RL'
EV = 'EV'
EVP = 'EVP'
def getTOU(self, dt):
d = dt.date()
t = dt.time()
bge_holidays = BGEHolidays(dt.year)
if self == self.R:
return TimeOfUse.offpeak
elif self == self.RL:
if Season.get(dt) == Season.Summer:
if (t.hour >=10 and t.hour < 20) and \
(dt.weekday() < 5) and \
(d not in bge_holidays):
return TimeOfUse.peak
elif ((t.hour >= 7 and t.hour < 10) or (t.hour >= 20 and t.hour < 23)) and \
(dt.weekday() < 5) and \
(d not in bge_holidays):
return TimeOfUse.shoulder
else:
return TimeOfUse.offpeak
else:
if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \
(dt.weekday() < 5) and \
(d not in bge_holidays):
return TimeOfUse.peak
elif (t.hour >= 11 and t.hour < 17) and \
(dt.weekday() < 5) and \
(d not in bge_holidays):
return TimeOfUse.shoulder
else:
return TimeOfUse.offpeak
elif self in (self.EV, self.EVP):
if Season.get(dt) == Season.Summer:
if (t.hour >= 10 and t.hour < 20) and \
(dt.weekday() < 5) and \
(d not in bge_holidays):
return TimeOfUse.peak
else:
return TimeOfUse.offpeak
else:
if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \
(dt.weekday() < 5) and \
(d not in bge_holidays):
return TimeOfUse.peak
else:
return TimeOfUse.offpeak
rates = {
(Schedule.R, Season.Summer, TimeOfUse.offpeak): .06722,
(Schedule.R, Season.Winter, TimeOfUse.offpeak): .07805,
(Schedule.RL, Season.Summer, TimeOfUse.peak): .08465,
(Schedule.RL, Season.Summer, TimeOfUse.shoulder): .06069,
(Schedule.RL, Season.Summer, TimeOfUse.offpeak): .05744,
(Schedule.RL, Season.Winter, TimeOfUse.peak): .09053,
(Schedule.RL, Season.Winter, TimeOfUse.shoulder): .07944,
(Schedule.RL, Season.Winter, TimeOfUse.offpeak): .07166,
(Schedule.EV, Season.Summer, TimeOfUse.peak): .1227,
(Schedule.EV, Season.Summer, TimeOfUse.offpeak): .03886,
(Schedule.EV, Season.Winter, TimeOfUse.peak): .18474,
(Schedule.EV, Season.Winter, TimeOfUse.offpeak): .0426,
(Schedule.EVP, Season.Summer, TimeOfUse.peak): .03886,
(Schedule.EVP, Season.Summer, TimeOfUse.offpeak): .03886,
(Schedule.EVP, Season.Winter, TimeOfUse.peak): .0426,
(Schedule.EVP, Season.Winter, TimeOfUse.offpeak): .0426
}
def get_rate(dt, schedule = Schedule.R):
bge_holidays = BGEHolidays(dt.year)
season = Season.get(dt)
tou = schedule.getTOU(dt)
return rates[(schedule, season, tou)]
def process_row(x):
dt = x['DATE_START TIME']
val = x['USAGE']
return pd.Series([dt] + [get_rate(dt, x) * (val + .0700) for x in Schedule], index=['DATE_START TIME'] + [x.value for x in Schedule])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type=argparse.FileType('r'))
args = parser.parse_args()
df = pd.read_csv(args.input_file, parse_dates=[['DATE', 'START TIME']])[['DATE_START TIME', 'USAGE']]
schedules = df.apply(process_row, axis=1)
print(schedules[['R', 'RL', 'EV', 'EVP']].sum())
if __name__ == '__main__':
main()
|
from django.contrib import admin
from oppurtunity.models import Opportunity
admin.site.register(Opportunity)
|
import tkinter as tk
from tkinter import *
import cv2
import csv
import os
import numpy as np
from PIL import Image,ImageTk
import pandas as pd
import datetime
import time
##Error screen2
def del_sc2():
sc2.destroy()
def err_screen1():
global sc2
sc2 = tk.Tk()
sc2.geometry('300x100')
sc2.iconbitmap('FRAMS.ico')
sc2.title('Warning!!')
sc2.configure(background='snow')
Label(sc2,text='Please enter your subject name!!!',fg='red',bg='white',font=('times', 16, ' bold ')).pack()
Button(sc2,text='OK',command=del_sc2,fg="black" ,bg="lawn green" ,width=9 ,height=1, activebackground = "Red" ,font=('times', 15, ' bold ')).place(x=90,y= 50)
def Fillattendances():
sub = tx.get()
now = time.time() ###For calculate seconds of video
future = now + 20
if time.time() < future:
if sub == '':
err_screen1()
else:
recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()
try:
recognizer.read("TrainingImageLabel\Trainner.yml")
except:
e = 'Model not found,Please train model'
Notifica.configure(text=e, bg="red", fg="black", width=33, font=('times', 15, 'bold'))
Notifica.place(x=20, y=250)
harcascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath)
df = pd.read_csv("StudentDetails\StudentDetails.csv")
cam = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
col_names = ['Enrollment', 'Name', 'Date', 'Time']
attendance = pd.DataFrame(columns=col_names)
while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5)
for (x, y, w, h) in faces:
global Id
Id, conf = recognizer.predict(gray[y:y + h, x:x + w])
if (conf < 70):
print(conf)
global Subject
global aa
global date
global timeStamp
Subject = tx.get()
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
aa = df.loc[df['Enrollment'] == Id]['Name'].values
global tt
tt = str(Id) + "-" + aa
En = '15624031' + str(Id)
attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 260, 0), 7)
cv2.putText(im, str(tt), (x + h, y), font, 1, (255, 255, 0,), 4)
else:
Id = 'Unknown'
tt = str(Id)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7)
cv2.putText(im, str(tt), (x + h, y), font, 1, (0, 25, 255), 4)
if time.time() > future:
break
attendance = attendance.drop_duplicates(['Enrollment'], keep='first')
cv2.imshow('Filling attedance..', im)
key = cv2.waitKey(30) & 0xff
if key == 27:
break
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour, Minute, Second = timeStamp.split(":")
fileName = "Attendance/" + Subject + "_" + date + "_" + Hour + "-" + Minute + "-" + Second + ".csv"
attendance = attendance.drop_duplicates(['Enrollment'], keep='first')
print(attendance)
attendance.to_csv(fileName, index=False)
M = 'Attendance filled Successfully'
Notifica.configure(text=M, bg="Green", fg="white", width=33, font=('times', 15, 'bold'))
Notifica.place(x=20, y=250)
cam.release()
cv2.destroyAllWindows()
import csv
import tkinter
root = tkinter.Tk()
root.title("Attendance of " + Subject)
root.configure(background='snow')
cs = './' + fileName
with open(cs, newline="") as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
# i've added some styling
label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '),
bg="lawn green", text=row, relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
root.mainloop()
print(attendance)
if __name__ == '__main__':
###windo is frame for subject choosing
windo = tk.Tk()
windo.iconbitmap('FRAMS.ico')
windo.title("Enter subject name...")
windo.geometry('580x320')
windo.configure(background='snow')
Notifica = tk.Label(windo, text="Attendance filled Successfully", bg="Green", fg="white", width=33,
height=2, font=('times', 15, 'bold'))
def Attf():
import subprocess
subprocess.Popen(
r'explorer /select,".\Attendance\Manually Attendance\"') # open attendance sheet window
attf = tk.Button(windo, text="Check Sheets", command=Attf, fg="black", bg="lawn green", width=12, height=1,
activebackground="Red", font=('times', 14, ' bold '))
attf.place(x=430, y=255)
sub = tk.Label(windo, text="Enter Subject", width=15, height=2, fg="white", bg="blue2",
font=('times', 15, ' bold '))
sub.place(x=30, y=100)
tx = tk.Entry(windo, width=20, bg="yellow", fg="red", font=('times', 23, ' bold '))
tx.place(x=250, y=105)
fill_a = tk.Button(windo, text="Fill Attendance", fg="white", command=Fillattendances, bg="deep pink", width=20,
height=2,
activebackground="Red", font=('times', 15, ' bold '))
fill_a.place(x=250, y=160)
windo.mainloop()
|
# coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tokenization ops for BERT preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import string_ops
from tensorflow_text.python.ops import regex_split_ops
from tensorflow_text.python.ops.normalize_ops import case_fold_utf8
from tensorflow_text.python.ops.normalize_ops import normalize_utf8
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer
_DELIM_REGEX = [
r"\s+",
r"|".join([
r"[!-/]",
r"[:-@]",
r"[\[-`]",
r"[{-~]",
r"[\p{P}]",
]),
r"|".join([
r"[\x{4E00}-\x{9FFF}]",
r"[\x{3400}-\x{4DBF}]",
r"[\x{20000}-\x{2A6DF}]",
r"[\x{2A700}-\x{2B73F}]",
r"[\x{2B740}-\x{2B81F}]",
r"[\x{2B820}-\x{2CEAF}]",
r"[\x{F900}-\x{FAFF}]",
r"[\x{2F800}-\x{2FA1F}]",
]),
]
_DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE.remove(r"\s+")
_UNUSED_TOKEN_REGEX = "\\[unused\\d+\\]"
_KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE)
class BasicTokenizer(TokenizerWithOffsets):
r"""Basic tokenizer for for tokenizing text.
A basic tokenizer that tokenizes using some deterministic rules:
- For most languages, this tokenizer will split on whitespace.
- For Chinese, Japanese, and Korean characters, this tokenizer will split on
Unicode characters.
Attributes:
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, apply NFD normalization, and strip accents characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If true and lower_case=False, the input text will be
normalized to `normalization_form`. See normalize_utf8() op for a list of
valid values.
preserve_unused_token: If true, text in the regex format "\\[unused\\d+\\]"
will be treated as a token and thus remain preserved as is to be looked up
in the vocabulary.
"""
def __init__(self,
lower_case=False,
keep_whitespace=False,
normalization_form=None,
preserve_unused_token=False):
self._lower_case = lower_case
if not keep_whitespace:
self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN
else:
self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN
self._normalization_form = normalization_form
if preserve_unused_token:
self._delim_regex_pattern = "|".join(
[_UNUSED_TOKEN_REGEX, _DELIM_REGEX_PATTERN])
self._keep_delim_regex_pattern = "|".join(
[_UNUSED_TOKEN_REGEX, self._keep_delim_regex_pattern])
else:
self._delim_regex_pattern = _DELIM_REGEX_PATTERN
def tokenize(self, text_input):
tokens, _, _ = self.tokenize_with_offsets(text_input)
return tokens
def tokenize_with_offsets(self, text_input):
"""Performs basic word tokenization for BERT.
Args:
text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings.
Returns:
A `RaggedTensor` of tokenized strings from text_input.
"""
# lowercase and strip accents (if option is set)
if self._lower_case:
text_input = case_fold_utf8(text_input)
text_input = normalize_utf8(text_input, "NFD")
text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "")
else:
# utf8 normalization
if self._normalization_form is not None:
text_input = normalize_utf8(text_input, self._normalization_form)
# strip out control characters
text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ")
return regex_split_ops.regex_split_with_offsets(
text_input, self._delim_regex_pattern, self._keep_delim_regex_pattern,
"BertBasicTokenizer")
class BertTokenizer(TokenizerWithOffsets):
r"""Tokenizer used for BERT.
This tokenizer applies an end-to-end, text string to wordpiece tokenization.
It first applies basic tokenization, and then follwed by wordpiece
tokenization.
See BasicTokenizer and WordpieceTokenizer for their respective details.
Attributes:
vocab_lookup_table: A lookup table implementing the LookupInterface
containing the vocabulary of subwords or a string which is the file path
to the vocab.txt file.
suffix_indicator: (optional) The characters prepended to a wordpiece to
indicate that it is a suffix to another subword. Default is '##'.
max_bytes_per_word: (optional) Max size of input token. Default is 100.
max_chars_per_token: (optional) Max size of subwords, excluding suffix
indicator. If known, providing this improves the efficiency of decoding
long words.
token_out_type: (optional) The type of the token to return. This can be
`tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.
unknown_token: (optional) The value to use when an unknown token is found.
Default is "[UNK]". If this is set to a string, and `token_out_type` is
`tf.int64`, the `vocab_lookup_table` is used to convert the
`unknown_token` to an integer. If this is set to `None`, out-of-vocabulary
tokens are left as is.
split_unknown_characters: (optional) Whether to split out single unknown
characters as subtokens. If False (default), words containing unknown
characters will be treated as single unknown tokens.
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, apply NFD normalization, and strip accents characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If true and lower_case=False, the input text will be
normalized to `normalization_form`. See normalize_utf8() op for a list of
valid values.
preserve_unused_token: If true, text in the regex format `\\[unused\\d+\\]`
will be treated as a token and thus remain preserved as is to be looked up
in the vocabulary.
"""
def __init__(self,
vocab_lookup_table,
suffix_indicator="##",
max_bytes_per_word=100,
max_chars_per_token=None,
token_out_type=dtypes.int64,
unknown_token="[UNK]",
split_unknown_characters=False,
lower_case=False,
keep_whitespace=False,
normalization_form=None,
preserve_unused_token=False):
if isinstance(vocab_lookup_table, str) or isinstance(
vocab_lookup_table, ops.Tensor):
init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)
vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(
init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)
print("Before ", type(lower_case))
if isinstance(lower_case, ops.Tensor):
lower_case = tf.compat.v1.get_default_session().run(lower_case)
print("After ", type(lower_case))
self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,
normalization_form,
preserve_unused_token)
self._wordpiece_tokenizer = WordpieceTokenizer(
vocab_lookup_table, suffix_indicator, max_bytes_per_word,
max_chars_per_token, token_out_type, unknown_token,
split_unknown_characters)
def tokenize_with_offsets(self, text_input):
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._wordpiece_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = array_ops.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def tokenize(self, text_input):
"""Performs untokenized text to wordpiece tokenization for BERT.
Args:
text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8
strings.
Returns:
A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string
contents (or ID in the vocab_lookup_table representing that string)
of the `jth` token in `input[i1...iN]`
"""
tokens = self._basic_tokenizer.tokenize(text_input)
return self._wordpiece_tokenizer.tokenize(tokens)
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import base64
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bithumb(Exchange):
def describe(self):
return self.deep_extend(super(bithumb, self).describe(), {
'id': 'bithumb',
'name': 'Bithumb',
'countries': ['KR'], # South Korea
'rateLimit': 500,
'has': {
'cancelOrder': True,
'CORS': True,
'createMarketOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchMarkets': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30597177-ea800172-9d5e-11e7-804c-b9d4fa9b56b0.jpg',
'api': {
'public': 'https://api.bithumb.com/public',
'private': 'https://api.bithumb.com',
},
'www': 'https://www.bithumb.com',
'doc': 'https://apidocs.bithumb.com',
'fees': 'https://en.bithumb.com/customer_support/info_fee',
},
'api': {
'public': {
'get': [
'ticker/{currency}',
'ticker/all',
'orderbook/{currency}',
'orderbook/all',
'transaction_history/{currency}',
'transaction_history/all',
],
},
'private': {
'post': [
'info/account',
'info/balance',
'info/wallet_address',
'info/ticker',
'info/orders',
'info/user_transactions',
'info/order_detail',
'trade/place',
'trade/cancel',
'trade/btc_withdrawal',
'trade/krw_deposit',
'trade/krw_withdrawal',
'trade/market_buy',
'trade/market_sell',
],
},
},
'fees': {
'trading': {
'maker': 0.25 / 100,
'taker': 0.25 / 100,
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'exceptions': {
'Bad Request(SSL)': BadRequest,
'Bad Request(Bad Method)': BadRequest,
'Bad Request.(Auth Data)': AuthenticationError, # {"status": "5100", "message": "Bad Request.(Auth Data)"}
'Not Member': AuthenticationError,
'Invalid Apikey': AuthenticationError, # {"status":"5300","message":"Invalid Apikey"}
'Method Not Allowed.(Access IP)': PermissionDenied,
'Method Not Allowed.(BTC Adress)': InvalidAddress,
'Method Not Allowed.(Access)': PermissionDenied,
'Database Fail': ExchangeNotAvailable,
'Invalid Parameter': BadRequest,
'5600': ExchangeError,
'Unknown Error': ExchangeError,
'After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions': ExchangeError, # {"status":"5100","message":"After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions"}
},
})
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def fetch_markets(self, params={}):
response = self.publicGetTickerAll(params)
data = self.safe_value(response, 'data')
currencyIds = list(data.keys())
result = []
quote = self.safe_currency_code('KRW')
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
if currencyId == 'date':
continue
market = data[currencyId]
base = self.safe_currency_code(currencyId)
symbol = currencyId + '/' + quote
active = True
if isinstance(market, list):
numElements = len(market)
if numElements == 0:
active = False
result.append({
'id': currencyId,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'active': active,
'precision': {
'amount': 4,
'price': 4,
},
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': 500,
'max': 5000000000,
},
},
'baseId': None,
'quoteId': None,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
request = {
'currency': 'ALL',
}
response = self.privatePostInfoBalance(self.extend(request, params))
result = {'info': response}
balances = self.safe_value(response, 'data')
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
account = self.account()
currency = self.currency(code)
lowerCurrencyId = self.safe_string_lower(currency, 'id')
account['total'] = self.safe_float(balances, 'total_' + lowerCurrencyId)
account['used'] = self.safe_float(balances, 'in_use_' + lowerCurrencyId)
account['free'] = self.safe_float(balances, 'available_' + lowerCurrencyId)
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['base'],
}
if limit is not None:
request['count'] = limit # default 30, max 30
response = self.publicGetOrderbookCurrency(self.extend(request, params))
#
# {
# "status":"0000",
# "data":{
# "timestamp":"1587621553942",
# "payment_currency":"KRW",
# "order_currency":"BTC",
# "bids":[
# {"price":"8652000","quantity":"0.0043"},
# {"price":"8651000","quantity":"0.0049"},
# {"price":"8650000","quantity":"8.4791"},
# ],
# "asks":[
# {"price":"8654000","quantity":"0.119"},
# {"price":"8655000","quantity":"0.254"},
# {"price":"8658000","quantity":"0.119"},
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(data, 'timestamp')
return self.parse_order_book(data, timestamp, 'bids', 'asks', 'price', 'quantity')
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "opening_price":"227100",
# "closing_price":"228400",
# "min_price":"222300",
# "max_price":"230000",
# "units_traded":"82618.56075337",
# "acc_trade_value":"18767376138.6031",
# "prev_closing_price":"227100",
# "units_traded_24H":"151871.13484676",
# "acc_trade_value_24H":"34247610416.8974",
# "fluctate_24H":"8700",
# "fluctate_rate_24H":"3.96",
# "date":"1587710327264", # fetchTickers inject self
# }
#
timestamp = self.safe_integer(ticker, 'date')
symbol = None
if market is not None:
symbol = market['symbol']
open = self.safe_float(ticker, 'opening_price')
close = self.safe_float(ticker, 'closing_price')
change = None
percentage = None
average = None
if (close is not None) and (open is not None):
change = close - open
if open > 0:
percentage = change / open * 100
average = self.sum(open, close) / 2
baseVolume = self.safe_float(ticker, 'units_traded_24H')
quoteVolume = self.safe_float(ticker, 'acc_trade_value_24H')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max_price'),
'low': self.safe_float(ticker, 'min_price'),
'bid': self.safe_float(ticker, 'buy_price'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell_price'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickerAll(params)
#
# {
# "status":"0000",
# "data":{
# "BTC":{
# "opening_price":"9045000",
# "closing_price":"9132000",
# "min_price":"8938000",
# "max_price":"9168000",
# "units_traded":"4619.79967497",
# "acc_trade_value":"42021363832.5187",
# "prev_closing_price":"9041000",
# "units_traded_24H":"8793.5045804",
# "acc_trade_value_24H":"78933458515.4962",
# "fluctate_24H":"530000",
# "fluctate_rate_24H":"6.16"
# },
# "date":"1587710878669"
# }
# }
#
result = {}
data = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(data, 'date')
tickers = self.omit(data, 'date')
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
isArray = isinstance(ticker, list)
if not isArray:
ticker['date'] = timestamp
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['base'],
}
response = self.publicGetTickerCurrency(self.extend(request, params))
#
# {
# "status":"0000",
# "data":{
# "opening_price":"227100",
# "closing_price":"228400",
# "min_price":"222300",
# "max_price":"230000",
# "units_traded":"82618.56075337",
# "acc_trade_value":"18767376138.6031",
# "prev_closing_price":"227100",
# "units_traded_24H":"151871.13484676",
# "acc_trade_value_24H":"34247610416.8974",
# "fluctate_24H":"8700",
# "fluctate_rate_24H":"3.96",
# "date":"1587710327264"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "transaction_date":"2020-04-23 22:21:46",
# "type":"ask",
# "units_traded":"0.0125",
# "price":"8667000",
# "total":"108337"
# }
#
# fetchOrder(private)
#
# {
# "transaction_date": "1572497603902030",
# "price": "8601000",
# "units": "0.005",
# "fee_currency": "KRW",
# "fee": "107.51",
# "total": "43005"
# }
#
# a workaround for their bug in date format, hours are not 0-padded
timestamp = None
transactionDatetime = self.safe_string(trade, 'transaction_date')
if transactionDatetime is not None:
parts = transactionDatetime.split(' ')
numParts = len(parts)
if numParts > 1:
transactionDate = parts[0]
transactionTime = parts[1]
if len(transactionTime) < 8:
transactionTime = '0' + transactionTime
timestamp = self.parse8601(transactionDate + ' ' + transactionTime)
else:
timestamp = self.safe_integer_product(trade, 'transaction_date', 0.001)
if timestamp is not None:
timestamp -= 9 * 3600000 # they report UTC + 9 hours, server in Korean timezone
type = None
side = self.safe_string(trade, 'type')
side = 'sell' if (side == 'ask') else 'buy'
id = self.safe_string(trade, 'cont_no')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'units_traded')
cost = self.safe_float(trade, 'total')
if cost is None:
if amount is not None:
if price is not None:
cost = price * amount
fee = None
feeCost = self.safe_float(trade, 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fee_currency')
feeCurrencyCode = self.common_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['base'],
}
if limit is None:
request['count'] = limit # default 20, max 100
response = self.publicGetTransactionHistoryCurrency(self.extend(request, params))
#
# {
# "status":"0000",
# "data":[
# {
# "transaction_date":"2020-04-23 22:21:46",
# "type":"ask",
# "units_traded":"0.0125",
# "price":"8667000",
# "total":"108337"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'order_currency': market['id'],
'Payment_currency': market['quote'],
'units': amount,
}
method = 'privatePostTradePlace'
if type == 'limit':
request['price'] = price
request['type'] = 'bid' if (side == 'buy') else 'ask'
else:
method = 'privatePostTradeMarket' + self.capitalize(side)
response = getattr(self, method)(self.extend(request, params))
id = self.safe_string(response, 'order_id')
if id is None:
raise InvalidOrder(self.id + ' createOrder did not return an order id')
return {
'info': response,
'symbol': symbol,
'type': type,
'side': side,
'id': id,
}
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'count': 1,
'order_currency': market['base'],
'payment_currency': market['quote'],
}
response = self.privatePostInfoOrderDetail(self.extend(request, params))
#
# {
# "status": "0000",
# "data": {
# "transaction_date": "1572497603668315",
# "type": "bid",
# "order_status": "Completed",
# "order_currency": "BTC",
# "payment_currency": "KRW",
# "order_price": "8601000",
# "order_qty": "0.007",
# "cancel_date": "",
# "cancel_type": "",
# "contract": [
# {
# "transaction_date": "1572497603902030",
# "price": "8601000",
# "units": "0.005",
# "fee_currency": "KRW",
# "fee": "107.51",
# "total": "43005"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data')
return self.parse_order(self.extend(data, {'order_id': id}), market)
def parse_order_status(self, status):
statuses = {
'Pending': 'open',
'Completed': 'closed',
'Cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# fetchOrder
#
# {
# "transaction_date": "1572497603668315",
# "type": "bid",
# "order_status": "Completed",
# "order_currency": "BTC",
# "payment_currency": "KRW",
# "order_price": "8601000",
# "order_qty": "0.007",
# "cancel_date": "",
# "cancel_type": "",
# "contract": [
# {
# "transaction_date": "1572497603902030",
# "price": "8601000",
# "units": "0.005",
# "fee_currency": "KRW",
# "fee": "107.51",
# "total": "43005"
# },
# ]
# }
#
# fetchOpenOrders
#
# {
# "order_currency": "BTC",
# "payment_currency": "KRW",
# "order_id": "C0101000007408440032",
# "order_date": "1571728739360570",
# "type": "bid",
# "units": "5.0",
# "units_remaining": "5.0",
# "price": "501000",
# }
#
timestamp = self.safe_integer_product(order, 'order_date', 0.001)
sideProperty = self.safe_value_2(order, 'type', 'side')
side = 'buy' if (sideProperty == 'bid') else 'sell'
status = self.parse_order_status(self.safe_string(order, 'order_status'))
price = self.safe_float_2(order, 'order_price', 'price')
type = 'limit'
if price == 0:
price = None
type = 'market'
amount = self.safe_float_2(order, 'order_qty', 'units')
remaining = self.safe_float(order, 'units_remaining')
if remaining is None:
if status == 'closed':
remaining = 0
else:
remaining = amount
filled = None
if (amount is not None) and (remaining is not None):
filled = amount - remaining
symbol = None
baseId = self.safe_string(order, 'order_currency')
quoteId = self.safe_string(order, 'payment_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
if (base is not None) and (quote is not None):
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
rawTrades = self.safe_value(order, 'contract')
trades = None
id = self.safe_string(order, 'order_id')
if rawTrades is not None:
trades = self.parse_trades(rawTrades, market, None, None, {
'side': side,
'symbol': symbol,
'order': id,
})
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': trades,
}
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100
request = {
'count': limit,
'order_currency': market['base'],
'payment_currency': market['quote'],
}
if since is not None:
request['after'] = since
response = self.privatePostInfoOrders(self.extend(request, params))
#
# {
# "status": "0000",
# "data": [
# {
# "order_currency": "BTC",
# "payment_currency": "KRW",
# "order_id": "C0101000007408440032",
# "order_date": "1571728739360570",
# "type": "bid",
# "units": "5.0",
# "units_remaining": "5.0",
# "price": "501000",
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def cancel_order(self, id, symbol=None, params={}):
side_in_params = ('side' in params)
if not side_in_params:
raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument and a `side` parameter(sell or buy)')
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument and a `side` parameter(sell or buy)')
market = self.market(symbol)
side = 'bid' if (params['side'] == 'buy') else 'ask'
params = self.omit(params, ['side', 'currency'])
# https://github.com/ccxt/ccxt/issues/6771
request = {
'order_id': id,
'type': side,
'order_currency': market['base'],
'payment_currency': market['quote'],
}
return self.privatePostTradeCancel(self.extend(request, params))
def cancel_unified_order(self, order, params={}):
request = {
'side': order['side'],
}
return self.cancel_order(order['id'], order['symbol'], self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'units': amount,
'address': address,
'currency': currency['id'],
}
if currency == 'XRP' or currency == 'XMR':
destination = self.safe_string(params, 'destination')
if (tag is None) and (destination is None):
raise ArgumentsRequired(self.id + ' ' + code + ' withdraw() requires a tag argument or an extra destination param')
elif tag is not None:
request['destination'] = tag
response = self.privatePostTradeBtcWithdrawal(self.extend(request, params))
return {
'info': response,
'id': None,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/' + self.implode_params(path, params)
url = self.urls['api'][api] + endpoint
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'endpoint': endpoint,
}, query))
nonce = str(self.nonce())
auth = endpoint + "\0" + body + "\0" + nonce # eslint-disable-line quotes
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512)
signature64 = self.decode(base64.b64encode(self.encode(signature)))
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'Api-Key': self.apiKey,
'Api-Sign': str(signature64),
'Api-Nonce': nonce,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"5100","message":"After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions"}
#
status = self.safe_string(response, 'status')
message = self.safe_string(response, 'message')
if status is not None:
if status == '0000':
return # no error
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, status, feedback)
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
raise ExchangeError(feedback)
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] == '0000':
return response
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
from btc.utils import mod_inverse, int2hex
# Parameters for SECP256k1 elliptic curve (used by Bitcoin)
SECP256K1_A = 0
SECP256K1_B = 7
SECP256K1_GX = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
SECP256K1_GY = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
SECP256K1_P = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1
SECP256K1_ORDER = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
SECP256K1_ORDER_LEN = SECP256K1_ORDER.bit_length()
SECP256K1_H = 1
class ECPoint:
"""Represents a point on an elliptic curve"""
def __init__(self, x, y, a=SECP256K1_A, b=SECP256K1_B, mod=SECP256K1_P):
"""Construct an ECPoint on the elliptic curve:
y^2 = x^3 + a*x + b (mod p)
"""
# Check if the point(x,y) is the infinity
if x or y:
# Check if the point(x,y) is on the elliptic curve
assert self.is_contained(x, y, a, b, mod), \
"The point {:x}, {:x} is not on " \
"the elliptic curve".format(x, y)
self.x, self.y, self.a, self.b, self.mod = x, y, a, b, mod
def __add__(self, other):
if self.x == other.x and self.y == other.y:
return self.double(self)
else:
return self.add(self, other)
def __mul__(self, other):
return self.multiply(self, other)
def __repr__(self):
return "({:s}, {:s})".format(int2hex(self.x), int2hex(self.y))
def __eq__(self, other):
return (self.x == other.x) & (self.y == other.y)
def add(self, p1, p2):
"""Return the sum of two ECPoint"""
# The sum of infinity + p2 = p2
if p1 == ECPoint.infinity():
return p2
# The sum of p1 + infinity = p1
if p2 == ECPoint.infinity():
return p1
# Check if the points are on a vertical line
if p1.x == p2.x:
# If p1 and p2 is the same then double(point)
# else the result is infinity.
if p1.y == p2.y:
return self.double(p1)
else:
return ECPoint.infinity()
# Sum point:
# x3 = s^2 - x1 - x2
# y3 = s(x1-x3) / y1
# where s = (y2-y1) / (x2-x1)
p3 = ECPoint(0, 0, p1.a, p1.b, p1.mod)
dy = (p2.y - p1.y) % p1.mod
dx = (p2.x - p1.x) % p1.mod
s = (dy * mod_inverse(dx, p1.mod)) % p1.mod
p3.x = (s * s - p1.x - p2.x) % p1.mod
p3.y = (s * (p1.x - p3.x) - p1.y) % p1.mod
return p3
def double(self, p):
"""Return point * 2"""
if p == ECPoint.infinity():
return ECPoint.infinity()
# Sum point:
# x3 = s^2 - x1 - x2
# y3 = s*(x1-x3) / y1
# where s = (3*x^2 + a) / 2*y1
p2 = ECPoint(0, 0, p.a, p.b, p.mod)
dy = (3 * p.x * p.x + p.a) % p.mod
dx = (2 * p.y) % p.mod
s = (dy * mod_inverse(dx, p.mod)) % p.mod
p2.x = (s * s - p.x - p.x) % p.mod
p2.y = (s * (p.x - p2.x) - p.y) % p.mod
return p2
def multiply(self, p, x):
"""Return p * x = p + p + ... + p"""
temp = ECPoint(p.x, p.y, p.a, p.b, p.mod)
x = x - 1
while x > 0:
if x % 2 != 0:
temp = self.double(temp) if temp == p else self.add(temp, p)
x = x - 1
x = x // 2
p = self.double(p)
return temp
@staticmethod
def infinity():
"""Return the infinity point on the elliptic curve point"""
return ECPoint(0, 0)
@staticmethod
def is_contained(x, y, a, b, mod):
"""Check if a point is on the elliptic curve"""
# The elliptic curve -- y^2 = x^3 + a*x + b (mod p)
return (y ** 2 - (x ** 3 + a * x + b)) % mod == 0
@classmethod
def get_secp256k1_y(cls, x, a=SECP256K1_A, b=SECP256K1_B, p=SECP256K1_P):
"""Calculate y of a point with x"""
# The elliptic curve -- y^2 = x^3 + a*x + b (mod p)
# To solve y^2 = z mod p:
# if p mod 4 = 3 => y = z^((p+1)/4)
# So for y^2 = x^3 + ax + b (mod p):
# y = (x^3 + ax + b)^((p+1)/4) (mod p)
y = pow(x ** 3 + x * a + b, (p + 1) // 4, p)
# Check if the point(x,y) is on the elliptic curve
assert cls.is_contained(x, y, a, b, p), \
"The point {:x}, {:x} is not on the elliptic curve".format(x, y)
return y
@staticmethod
def get_secp256k1_a():
return SECP256K1_A
@staticmethod
def get_secp256k1_b():
return SECP256K1_B
@staticmethod
def get_secp256k1_gx():
return SECP256K1_GX
@staticmethod
def get_secp256k1_gy():
return SECP256K1_GY
@staticmethod
def get_secp256k1_p():
return SECP256K1_P
@staticmethod
def get_secp256k1_order():
return SECP256K1_ORDER
@staticmethod
def get_secp256k1_order_len():
return SECP256K1_ORDER_LEN
@staticmethod
def get_secp256k1_h():
return SECP256K1_H
|
# GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Proxy of a bpy.types.Struct, excluding bpy.types.ID that is implemented in datablock_proxy.py
See synchronization.md
"""
from __future__ import annotations
from functools import lru_cache
import logging
from typing import Optional, Tuple, TYPE_CHECKING, Union
import bpy.types as T # noqa
from mixer.blender_data import specifics
from mixer.blender_data.attributes import apply_attribute, diff_attribute, read_attribute, write_attribute
from mixer.blender_data.json_codec import serialize
from mixer.blender_data.misc_proxies import NonePtrProxy
from mixer.blender_data.proxy import Delta, DeltaReplace, DeltaUpdate, Proxy
if TYPE_CHECKING:
from mixer.blender_data.proxy import Context
logger = logging.getLogger(__name__)
def _create_clear_animation_data(incoming_proxy: StructProxy, existing_struct: T.bpy_struct) -> Optional[T.AnimData]:
if existing_struct.animation_data is None:
if not isinstance(incoming_proxy, NonePtrProxy):
# None (current blender value) -> not None (incoming proxy)
existing_struct.animation_data_create()
else:
if isinstance(incoming_proxy, NonePtrProxy):
# not None (current blender value) -> None (incoming proxy)
existing_struct.animation_data_clear()
return existing_struct.animation_data
@lru_cache()
def _proxy_types():
from mixer.blender_data.modifier_proxies import NodesModifierProxy
proxy_types = {}
try:
proxy_types[T.NodesModifier] = NodesModifierProxy
except AttributeError:
pass
return proxy_types
@serialize
class StructProxy(Proxy):
"""
Holds a copy of a Blender bpy_struct
"""
_serialize: Tuple[str, ...] = ("_data",)
def __init__(self):
self._data = {}
pass
def copy_data(self, other: StructProxy):
self._data = other._data
def clear_data(self):
self._data.clear()
@classmethod
def make(cls, bpy_struct: T.bpy_struct) -> StructProxy:
proxy_class = _proxy_types().get(type(bpy_struct), StructProxy)
return proxy_class()
def load(self, attribute: T.bpy_struct, context: Context) -> StructProxy:
"""
Load the attribute Blender struct into this proxy
Args:
attribute: the Blender struct to load into this proxy, (e.g an ObjectDisplay instance)
key: the identifier of attribute in its parent (e.g. "display")
context: the proxy and visit state
"""
self.clear_data()
properties = context.synchronized_properties.properties(attribute)
# includes properties from the bl_rna only, not the "view like" properties like MeshPolygon.edge_keys
# that we do not want to load anyway
properties = specifics.conditional_properties(attribute, properties)
for name, bl_rna_property in properties:
attr = getattr(attribute, name)
attr_value = read_attribute(attr, name, bl_rna_property, attribute, context)
self._data[name] = attr_value
return self
def save(
self,
attribute: T.bpy_struct,
parent: Union[T.bpy_struct, T.bpy_prop_collection],
key: Union[int, str],
context: Context,
):
"""
Save this proxy into attribute
Args:
attribute: the bpy_struct to store this proxy into
parent: (e.g an Object instance)
key: (e.g. "display)
context: the proxy and visit state
"""
if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)):
attribute = _create_clear_animation_data(self, parent)
if attribute is None:
logger.info(f"save: attribute is None for {context.visit_state.display_path()}.{key}")
return
for k, v in self._data.items():
write_attribute(attribute, k, v, context)
def apply(
self,
attribute: T.bpy_struct,
parent: Union[T.bpy_struct, T.bpy_prop_collection],
key: Union[int, str],
delta: Delta,
context: Context,
to_blender: bool = True,
) -> Union[StructProxy, NonePtrProxy]:
"""
Apply delta to this proxy and optionally to the Blender attribute its manages.
Args:
attribute: the struct to update (e.g. a Material instance)
parent: the attribute that contains attribute (e.g. bpy.data.materials)
key: the key that identifies attribute in parent (e.g "Material")
delta: the delta to apply
context: proxy and visit state
to_blender: update the managed Blender attribute in addition to this Proxy
"""
# WARNING parent must not be searched for key as it will fail in case of duplicate keys, with libraries
update = delta.value
if isinstance(delta, DeltaReplace):
# The structure is replaced as a whole.
# TODO explain when this occurs
self.copy_data(update)
if to_blender:
self.save(attribute, parent, key, context)
else:
# the structure is updated
if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)):
# if animation_data is updated to None (cleared), the parent structure is updated to store
# a NonePtrProxy
if to_blender:
attribute = _create_clear_animation_data(update, parent)
if attribute is None:
return NonePtrProxy()
else:
if isinstance(update, NonePtrProxy):
return NonePtrProxy()
if attribute:
for k, member_delta in update._data.items():
current_value = self._data.get(k)
try:
self._data[k] = apply_attribute(attribute, k, current_value, member_delta, context, to_blender)
except Exception as e:
logger.warning(f"Struct.apply(). Processing {member_delta}")
logger.warning(f"... for {attribute}.{k}")
logger.warning(f"... Exception: {e!r}")
logger.warning("... Update ignored")
continue
return self
def diff(
self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context
) -> Optional[Delta]:
"""
Computes the difference between the state of an item tracked by this proxy and its Blender state.
As this proxy tracks a Struct or ID, the result will be a DeltaUpdate that contains a StructProxy
or a DatablockProxy with an Delta item per added, deleted or updated property. One expect only DeltaUpdate,
although DeltalAddition or DeltaDeletion may be produced when an addon is loaded or unloaded while
a room is joined. This situation is not really supported as there is no handler to track
addon changes.
Args:
attribute: the struct to update (e.g. a Material instance)
key: the key that identifies attribute in parent (e.g "Material")
prop: the Property of struct as found in its enclosing object
context: proxy and visit state
"""
# Create a proxy that will be populated with attributes differences.
diff = self.__class__()
diff.init(attribute)
delta = self._diff(attribute, key, prop, context, diff)
return delta
def _diff(
self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context, diff: StructProxy
) -> Optional[Delta]:
"""
Computes the difference between the state of an item tracked by this proxy and its Blender state
and attached the difference to diff.
See diff()
Args:
attribute: the struct to update (e.g. a Material instance)
key: the key that identifies attribute in parent (e.g "Material")
prop: the Property of struct as found in its enclosing object
context: proxy and visit state
diff: the proxy that holds the difference and will be transmitted in a Delta
Returns:
a delta if any difference is found, None otherwise
"""
if attribute is None:
from mixer.blender_data.misc_proxies import NonePtrProxy
return DeltaUpdate(NonePtrProxy())
# PERF accessing the properties from the synchronized_properties is **far** cheaper that iterating over
# _data and the getting the properties with
# member_property = struct.bl_rna.properties[k]
# line to which py-spy attributes 20% of the total diff !
properties = context.synchronized_properties.properties(attribute)
properties = specifics.conditional_properties(attribute, properties)
for k, member_property in properties:
try:
member = getattr(attribute, k)
except AttributeError:
logger.info(f"diff: unknown attribute {k} in {attribute}")
continue
proxy_data = self._data.get(k)
delta = diff_attribute(member, k, member_property, proxy_data, context)
if delta is not None:
diff._data[k] = delta
# TODO detect media updates (reload(), and attach a media descriptor to diff)
# difficult ?
# if anything has changed, wrap the hollow proxy in a DeltaUpdate. This may be superfluous but
# it is homogenous with additions and deletions
if len(diff._data):
return DeltaUpdate(diff)
return None
|
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
import pycuda.autoinit
def allocate_buffers(engine, batch_size, data_type):
"""
This is the function to allocate buffers for input and output in the device
Args:
engine : The path to the TensorRT engine.
batch_size : The batch size for execution time.
data_type: The type of the data for input and output, for example trt.float32.
Output:
h_input_1: Input in the host.
d_input_1: Input in the device.
h_output_1: Output in the host.
d_output_1: Output in the device.
stream: CUDA stream.
"""
# Determine dimensions and create page-locked memory buffers (which won't be swapped to disk) to hold host inputs/outputs.
h_input_1 = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(data_type))
h_output = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(data_type))
# Allocate device memory for inputs and outputs.
d_input_1 = cuda.mem_alloc(h_input_1.nbytes)
d_output = cuda.mem_alloc(h_output.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()
return h_input_1, d_input_1, h_output, d_output, stream
def load_images_to_buffer(pics, pagelocked_buffer):
preprocessed = np.asarray(pics).ravel()
np.copyto(pagelocked_buffer, preprocessed)
def do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width):
"""
This is the function to run the inference
Args:
engine : Path to the TensorRT engine
pics_1 : Input images to the model.
h_input_1: Input in the host
d_input_1: Input in the device
h_output_1: Output in the host
d_output_1: Output in the device
stream: CUDA stream
batch_size : Batch size for execution time
height: Height of the output image
width: Width of the output image
Output:
The list of output images
"""
print('load images to buffer')
load_images_to_buffer(pics_1, h_input_1)
with engine.create_execution_context() as context:
context.debug_sync = False
# Transfer input data to the GPU.
cuda.memcpy_htod_async(d_input_1, h_input_1, stream)
# Run inference.
print('load profiler')
context.profiler = trt.Profiler()
print('execute')
context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)])
print('Transfer predictions back from the GPU.')
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
# Return the host output.
print(h_output.shape)
out = h_output.reshape((1,-1))
return out
|
"""parquet - tool for inspecting parquet files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import sys
def setup_logging(options=None):
"""Configure logging based on options."""
level = logging.DEBUG if options is not None and options.debug \
else logging.WARNING
console = logging.StreamHandler()
console.setLevel(level)
formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('parquet').setLevel(level)
logging.getLogger('parquet').addHandler(console)
def main(argv=None):
"""Run parquet utility application."""
argv = argv or sys.argv[1:]
parser = argparse.ArgumentParser('parquet',
description='Read parquet files')
parser.add_argument('--metadata', action='store_true',
help='show metadata on file')
parser.add_argument('--row-group-metadata', action='store_true',
help="show per row group metadata")
parser.add_argument('--no-data', action='store_true',
help="don't dump any data from the file")
parser.add_argument('--limit', action='store', type=int, default=-1,
help='max records to output')
parser.add_argument('--col', action='append', type=str,
help='only include this column (can be '
'specified multiple times)')
parser.add_argument('--no-headers', action='store_true',
help='skip headers in output (only applies if '
'format=csv)')
parser.add_argument('--format', action='store', type=str, default='csv',
help='format for the output data. can be csv or json.')
parser.add_argument('--debug', action='store_true',
help='log debug info to stderr')
parser.add_argument('file',
help='path to the file to parse')
args = parser.parse_args(argv)
setup_logging(args)
import parquet
if args.metadata:
parquet.dump_metadata(args.file, args.row_group_metadata)
if not args.no_data:
parquet.dump(args.file, args)
if __name__ == '__main__':
main()
|
"""
Copyright Government of Canada 2021
Written by: Eric Marinier, National Microbiology Laboratory,
Public Health Agency of Canada
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
from pathlib import Path
from proksee.assembler import Assembler
from proksee.reads import Reads
from proksee.skesa_assembler import SkesaAssembler
from proksee.resource_specification import ResourceSpecification
INPUT_DIR = os.path.join(Path(__file__).parent.absolute(), "data")
OUTPUT_DIR = os.path.join(Path(__file__).parent.absolute(), "output")
RESOURCE_SPECIFICATION = ResourceSpecification(4, 4) # 4 threads, 4 gigabytes
class TestAssembler:
def test_abstract_methods(self):
"""
Testing for crashes by simply running the abstract methods.
The methods contain only "pass" otherwise.
"""
forward_filename = os.path.join(INPUT_DIR, "NA12878_fwd.fastq")
reverse_filename = None
reads = Reads(forward_filename, reverse_filename)
# Can't instantiate abstract class, need to instantiate subclass:
assembler = SkesaAssembler(reads, OUTPUT_DIR, RESOURCE_SPECIFICATION)
Assembler.assemble(assembler)
Assembler.get_contigs_filename(assembler)
|
import heapq
import sys
from itertools import product
def load_data(path):
with open(path) as f:
return {
(i, j): int(value)
for i, line in enumerate(f.readlines())
for j, value in enumerate(line.strip())
}
def get_neighbours(loc, n_rows, n_cols):
neighbours = []
x, y = loc
for i in [-1, 1]:
if 0 <= x + i < n_rows:
neighbours.append((x + i, y))
if 0 <= y + i < n_cols:
neighbours.append((x, y + i))
return neighbours
def answer(cost):
n_rows = max(i for i, _ in cost) + 1
n_cols = max(j for _, j in cost) + 1
target = (n_rows - 1, n_cols - 1)
finalised = set()
queue = []
heapq.heappush(queue, (0, (0, 0)))
dist = {loc: float("inf") for loc in product(range(n_rows), range(n_cols))}
dist[(0, 0)] = 0
while len(finalised) < len(cost):
if target in finalised:
return dist[target]
min_cost_to_loc, loc = heapq.heappop(queue)
finalised.add(loc)
for nbr in get_neighbours(loc, n_rows, n_cols):
if nbr not in finalised:
old_dist = dist[nbr]
new_dist = dist[loc] + cost[nbr]
if new_dist < old_dist:
dist[nbr] = min_cost_to_loc + cost[nbr]
heapq.heappush(queue, (dist[nbr], nbr))
return dist[target]
def extend(cost):
n_rows = max(i for i, _ in cost) + 1
n_cols = max(j for _, j in cost) + 1
new_cost = {}
for i in range(5):
for j in range(5):
for loc in cost:
new_cost[(n_rows * i + loc[0], n_cols * j + loc[1])] = (
cost[loc] + i + j - 1
) % 9 + 1
return new_cost
def part_1(cost):
return answer(cost)
def part_2(cost):
cost = extend(cost)
return answer(cost)
if __name__ == "__main__":
data = load_data(sys.argv[1])
print(f"Part 1: {part_1(data)}")
print(f"Part 2: {part_2(data)}")
|
import re
from vsmetaEncoder import vsmetaInfo
from datetime import datetime, date
class VsMetaInfoGenerator(vsmetaInfo.VsMetaInfo):
def __init__(self, feedItem):
super(VsMetaInfoGenerator, self).__init__()
self.feedItem = feedItem
self.download_url = ''
# parse feedItem
if hasattr(feedItem, 'title'): self.episodeTitle = feedItem.title
if hasattr(feedItem, 'category'): self.showTitle = feedItem.category
if hasattr(feedItem, 'summary'): self.chapterSummary = feedItem.summary
if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description
if hasattr(feedItem, 'link'): self.download_url = feedItem.link
#if hasattr(feedItem, 'published'): self.episodeReleaseDate = datetime.strptime(feedItem.published, "%a, %d %b %Y %H:%M:%S GMT" )
if hasattr(feedItem, 'published'): self.setEpisodeDate(datetime.strptime(feedItem.published, "%a, %d %b %Y %H:%M:%S GMT").date())
if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description
#cleaning some parts
self.chapterSummary = self.chapterSummary.replace('![CDATA[', '')
self.chapterSummary = self.chapterSummary.replace(']]', '')
self.tvshowLocked = True
self.episodeLocked = True
episodeFound = re.search('[(](\d*)\/\d[)]',self.episodeTitle)
if episodeFound != None:
self.episode = int(episodeFound.group(1))
seasonFound = re.search(' Staffel (\d*) ',self.episodeTitle)
if seasonFound != None:
self.season = int(seasonFound.group(1))
# set other defaults
self.episodeLocked = False
self.tvshowLocked = False
self.identifyingTerm = '%s - %s -s%se%s' % (self.showTitle, self.episodeTitle, self.season, self.episode)
def isUsable(self) ->bool:
if (len(self.episodeTitle) > 0 or len(self.showTitle) > 0 or len(self.showTitle2) > 0) and len(self.download_url) > 0:
return True
else:
return False
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pointnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car import pointnet
class PointNetTest(test_utils.TestCase, parameterized.TestCase):
def _testOutShape(self, p, input_shape, expected_shape):
batch_size, num_points, _ = input_shape
g = tf.Graph()
with g.as_default():
net = p.Instantiate()
input_data = py_utils.NestedMap(
points=tf.random_uniform((batch_size, num_points, 3)),
features=tf.random_uniform(input_shape),
padding=tf.zeros((batch_size, num_points), dtype=tf.float32),
label=tf.random_uniform((batch_size,),
minval=0,
maxval=16,
dtype=tf.int32))
result = net.FPropDefaultTheta(input_data)
with self.session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
np_result = sess.run(result)
self.assertEqual(np_result.shape, expected_shape)
@parameterized.parameters((128, 3), (128, 9), (256, 3))
def testPointNetClassifier(self, feature_dims, input_dims):
p = pointnet.PointNet().Classifier(
input_dims=input_dims, feature_dims=feature_dims)
# Network should produce a global feature of feature_dims.
self.assertEqual(p.output_dim, feature_dims)
self._testOutShape(p, (8, 128, input_dims), (8, feature_dims))
def testPointNetSegmentation(self):
p = pointnet.PointNet().Segmentation()
# Network takes batch_size=8 input and produce 128-dim pointwise feature.
self.assertEqual(p.output_dim, 128)
self._testOutShape(p, (8, 100, 3), (8, 100, 128))
def testPointNetSegmentationShapeNet(self):
p = pointnet.PointNet().SegmentationShapeNet()
self.assertEqual(p.output_dim, 128)
self._testOutShape(p, (8, 2000, 3), (8, 2000, 128))
@parameterized.parameters((128, 3), (128, 9), (256, 3))
def testPointNetPPClassifier(self, feature_dims, input_dims):
p = pointnet.PointNetPP().Classifier(
input_dims=input_dims, feature_dims=feature_dims)
# Network should produce a global feature of feature_dims.
self.assertEqual(p.output_dim, feature_dims)
self._testOutShape(p, (8, 1024, input_dims), (8, feature_dims))
if __name__ == '__main__':
tf.test.main()
|
"""empty message
Revision ID: f025f89b250b
Revises: 37eabcbbb8fb
Create Date: 2019-10-19 18:12:48.976655
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f025f89b250b'
down_revision = '37eabcbbb8fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('df_goods_image',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('sku', sa.Integer(), nullable=True),
sa.Column('image', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['sku'], ['df_goods_sku.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('df_goods_image')
# ### end Alembic commands ###
|
import logging
from pyhocon import ConfigTree # noqa: F401
from typing import Any # noqa: F401
from databuilder.extractor.base_extractor import Extractor
from databuilder.extractor.dashboard.mode_analytics.mode_dashboard_utils import ModeDashboardUtils
from databuilder.rest_api.mode_analytics.mode_paginated_rest_api_query import ModePaginatedRestApiQuery
from databuilder.rest_api.rest_api_query import RestApiQuery # noqa: F401
LOGGER = logging.getLogger(__name__)
class ModeDashboardUsageExtractor(Extractor):
"""
A Extractor that extracts Mode dashboard's accumulated view count
"""
def init(self, conf):
# type: (ConfigTree) -> None
self._conf = conf
restapi_query = self._build_restapi_query()
self._extractor = ModeDashboardUtils.create_mode_rest_api_extractor(restapi_query=restapi_query,
conf=self._conf)
def extract(self):
# type: () -> Any
return self._extractor.extract()
def get_scope(self):
# type: () -> str
return 'extractor.mode_dashboard_usage'
def _build_restapi_query(self):
"""
Build REST API Query. To get Mode Dashboard usage, it needs to call two APIs (spaces API and reports
API) joining together.
:return: A RestApiQuery that provides Mode Dashboard metadata
"""
# type: () -> RestApiQuery
# https://mode.com/developer/api-reference/analytics/reports/#listReportsInSpace
reports_url_template = 'https://app.mode.com/api/{organization}/spaces/{dashboard_group_id}/reports'
spaces_query = ModeDashboardUtils.get_spaces_query_api(conf=self._conf)
params = ModeDashboardUtils.get_auth_params(conf=self._conf)
# Reports
# JSONPATH expression. it goes into array which is located in _embedded.reports and then extracts token,
# and view_count
json_path = '_embedded.reports[*].[token,view_count]'
field_names = ['dashboard_id', 'accumulated_view_count']
reports_query = ModePaginatedRestApiQuery(query_to_join=spaces_query, url=reports_url_template, params=params,
json_path=json_path, field_names=field_names, skip_no_result=True)
return reports_query
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Vehicle, ImageSpacec
from darknet_dmg import detect
@receiver(post_save, sender=Vehicle)
def damage_detection(sender, instance, **kwargs):
image = ImageSpace.objects.filter(vehicle=instance.id).last()
path = image.image.path
output = detect.detect(path)
print(output)
#./darknet detector test data/obj.data cfg/yolov4-obj.cfg /mydrive/yolov4/backup/yolov4-obj_3000.weights /mydrive/images/car2.jpg -thresh 0.3
|
import dash
import dash_core_components as dcc
import dash_html_components as html
about_layout = [html.Div(children=[
html.Img(src='/assets/logo.png', className='logo-big', style={'marginTop': 'auto', 'marginBottom': 'auto'}),
html.Div(children='Network and Sentiment Anlysis on Amazon Dataset', className="text-center title", style={'marginTop': '24px'}),
html.Div(children='Data Analytics Project', className="text-center subtitle", style={'marginTop': '24px', 'marginBottom': 'auto'}),
html.Div(children=[
dcc.Link(children=[
html.Div(children=[
html.Div(className='img-dataset'),
html.Div(children='Dataset Exploration'),
], className="zan-box-shadow card-small")],
href='/exploration', className="text-decor-none", style={'marginLeft': 'auto'}),
dcc.Link(children=[
html.Div(children=[
html.Div(className='img-network'),
html.Div(children='Network Analysis'),
], className="zan-box-shadow card-small")],
href='/network', className="text-decor-none"),
dcc.Link(children=[
html.Div(children=[
html.Div(className='img-sentiment'),
html.Div(children='Sentiment Anlysis'),
], className="zan-box-shadow card-small")],
href='/sentiment', className="text-decor-none", style={'marginRight': 'auto'}),
], className="flex-row", id='about', style={'marginTop': '24px', 'marginBottom': '24px'}),
html.Div(children='Authors: Christian Bernasconi - Gabriele Ferrario - Riccardo Pozzi - Marco Ripamonti', className="text-center caption", style={'marginTop': 'auto', 'marginBottom': '10px'}),
html.Div(children='Date: 06/07/2020', className="text-center caption", style={'marginBottom': '10px'}),
], className='flex-column-center p-20')]
|
"""
WSGI config for athena_dream_stodio_33748 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'athena_dream_stodio_33748.settings')
application = get_wsgi_application()
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import ssl
import sys
import traceback
import asyncio
import socket
from typing import Tuple, Union, List, TYPE_CHECKING, Optional, Set
from collections import defaultdict
from ipaddress import IPv4Network, IPv6Network, ip_address, IPv6Address
import itertools
import logging
import aiorpcx
from aiorpcx import RPCSession, Notification, NetAddress, NewlineFramer
from aiorpcx.curio import timeout_after, TaskTimeout
from aiorpcx.jsonrpc import JSONRPC, CodeMessageError
from aiorpcx.rawsocket import RSClient
import certifi
from .util import ignore_exceptions, log_exceptions, bfh, SilentTaskGroup
from . import util
from . import x509
from . import pem
from . import version
from . import blockchain
from .blockchain import Blockchain
from . import constants
from .i18n import _
from .logging import Logger
if TYPE_CHECKING:
from .network import Network
from .simple_config import SimpleConfig
ca_path = certifi.where()
BUCKET_NAME_OF_ONION_SERVERS = 'onion'
MAX_INCOMING_MSG_SIZE = 1_000_000 # in bytes
class NetworkTimeout:
# seconds
class Generic:
NORMAL = 30
RELAXED = 45
MOST_RELAXED = 180
class Urgent(Generic):
NORMAL = 10
RELAXED = 20
MOST_RELAXED = 60
class NotificationSession(RPCSession):
def __init__(self, *args, **kwargs):
super(NotificationSession, self).__init__(*args, **kwargs)
self.subscriptions = defaultdict(list)
self.cache = {}
self.default_timeout = NetworkTimeout.Generic.NORMAL
self._msg_counter = itertools.count(start=1)
self.interface = None # type: Optional[Interface]
self.cost_hard_limit = 0 # disable aiorpcx resource limits
async def handle_request(self, request):
self.maybe_log(f"--> {request}")
try:
if isinstance(request, Notification):
params, result = request.args[:-1], request.args[-1]
key = self.get_hashable_key_for_rpc_call(request.method, params)
if key in self.subscriptions:
self.cache[key] = result
for queue in self.subscriptions[key]:
await queue.put(request.args)
else:
raise Exception(f'unexpected notification')
else:
raise Exception(f'unexpected request. not a notification')
except Exception as e:
self.interface.logger.info(f"error handling request {request}. exc: {repr(e)}")
await self.close()
async def send_request(self, *args, timeout=None, **kwargs):
# note: semaphores/timeouts/backpressure etc are handled by
# aiorpcx. the timeout arg here in most cases should not be set
msg_id = next(self._msg_counter)
self.maybe_log(f"<-- {args} {kwargs} (id: {msg_id})")
try:
# note: RPCSession.send_request raises TaskTimeout in case of a timeout.
# TaskTimeout is a subclass of CancelledError, which is *suppressed* in TaskGroups
response = await asyncio.wait_for(
super().send_request(*args, **kwargs),
timeout)
except (TaskTimeout, asyncio.TimeoutError) as e:
raise RequestTimedOut(f'request timed out: {args} (id: {msg_id})') from e
except CodeMessageError as e:
self.maybe_log(f"--> {repr(e)} (id: {msg_id})")
raise
else:
self.maybe_log(f"--> {response} (id: {msg_id})")
return response
def set_default_timeout(self, timeout):
self.sent_request_timeout = timeout
self.max_send_delay = timeout
async def subscribe(self, method: str, params: List, queue: asyncio.Queue):
# note: until the cache is written for the first time,
# each 'subscribe' call might make a request on the network.
key = self.get_hashable_key_for_rpc_call(method, params)
self.subscriptions[key].append(queue)
if key in self.cache:
result = self.cache[key]
else:
result = await self.send_request(method, params)
self.cache[key] = result
await queue.put(params + [result])
def unsubscribe(self, queue):
"""Unsubscribe a callback to free object references to enable GC."""
# note: we can't unsubscribe from the server, so we keep receiving
# subsequent notifications
for v in self.subscriptions.values():
if queue in v:
v.remove(queue)
@classmethod
def get_hashable_key_for_rpc_call(cls, method, params):
"""Hashable index for subscriptions and cache"""
return str(method) + repr(params)
def maybe_log(self, msg: str) -> None:
if not self.interface: return
if self.interface.debug or self.interface.network.debug:
self.interface.logger.debug(msg)
def default_framer(self):
# overridden so that max_size can be customized
return NewlineFramer(max_size=MAX_INCOMING_MSG_SIZE)
class NetworkException(Exception): pass
class GracefulDisconnect(NetworkException):
log_level = logging.INFO
def __init__(self, *args, log_level=None, **kwargs):
Exception.__init__(self, *args, **kwargs)
if log_level is not None:
self.log_level = log_level
class RequestTimedOut(GracefulDisconnect):
def __str__(self):
return _("Network request timed out.")
class RequestCorrupted(GracefulDisconnect): pass
class ErrorParsingSSLCert(Exception): pass
class ErrorGettingSSLCertFromServer(Exception): pass
class ConnectError(NetworkException): pass
class _RSClient(RSClient):
async def create_connection(self):
try:
return await super().create_connection()
except OSError as e:
# note: using "from e" here will set __cause__ of ConnectError
raise ConnectError(e) from e
def deserialize_server(server_str: str) -> Tuple[str, str, str]:
# host might be IPv6 address, hence do rsplit:
host, port, protocol = str(server_str).rsplit(':', 2)
if not host:
raise ValueError('host must not be empty')
if host[0] == '[' and host[-1] == ']': # IPv6
host = host[1:-1]
if protocol not in ('s', 't'):
raise ValueError('invalid network protocol: {}'.format(protocol))
net_addr = NetAddress(host, port) # this validates host and port
host = str(net_addr.host) # canonical form (if e.g. IPv6 address)
return host, port, protocol
def serialize_server(host: str, port: Union[str, int], protocol: str) -> str:
return str(':'.join([host, str(port), protocol]))
def _get_cert_path_for_host(*, config: 'SimpleConfig', host: str) -> str:
filename = host
try:
ip = ip_address(host)
except ValueError:
pass
else:
if isinstance(ip, IPv6Address):
filename = f"ipv6_{ip.packed.hex()}"
return os.path.join(config.path, 'certs', filename)
class Interface(Logger):
LOGGING_SHORTCUT = 'i'
def __init__(self, network: 'Network', server: str, proxy: Optional[dict]):
self.ready = asyncio.Future()
self.got_disconnected = asyncio.Future()
self.server = server
self.host, self.port, self.protocol = deserialize_server(self.server)
self.port = int(self.port)
Logger.__init__(self)
assert network.config.path
self.cert_path = _get_cert_path_for_host(config=network.config, host=self.host)
self.blockchain = None # type: Optional[Blockchain]
self._requested_chunks = set() # type: Set[int]
self.network = network
self._set_proxy(proxy)
self.session = None # type: Optional[NotificationSession]
self._ipaddr_bucket = None
self.tip_header = None
self.tip = 0
# Dump network messages (only for this interface). Set at runtime from the console.
self.debug = False
asyncio.run_coroutine_threadsafe(
self.network.main_taskgroup.spawn(self.run()), self.network.asyncio_loop)
self.group = SilentTaskGroup()
def diagnostic_name(self):
return str(NetAddress(self.host, self.port))
def __str__(self):
return f"<Interface {self.diagnostic_name()}>"
def _set_proxy(self, proxy: dict):
if proxy:
username, pw = proxy.get('user'), proxy.get('password')
if not username or not pw:
auth = None
else:
auth = aiorpcx.socks.SOCKSUserAuth(username, pw)
addr = NetAddress(proxy['host'], proxy['port'])
if proxy['mode'] == "socks4":
self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS4a, auth)
elif proxy['mode'] == "socks5":
self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS5, auth)
else:
raise NotImplementedError # http proxy not available with aiorpcx
else:
self.proxy = None
async def is_server_ca_signed(self, ca_ssl_context):
"""Given a CA enforcing SSL context, returns True if the connection
can be established. Returns False if the server has a self-signed
certificate but otherwise is okay. Any other failures raise.
"""
try:
await self.open_session(ca_ssl_context, exit_early=True)
except ConnectError as e:
cause = e.__cause__
if isinstance(cause, ssl.SSLError) and cause.reason == 'CERTIFICATE_VERIFY_FAILED':
# failures due to self-signed certs are normal
return False
raise
return True
async def _try_saving_ssl_cert_for_first_time(self, ca_ssl_context):
ca_signed = await self.is_server_ca_signed(ca_ssl_context)
if ca_signed:
with open(self.cert_path, 'w') as f:
# empty file means this is CA signed, not self-signed
f.write('')
else:
await self.save_certificate()
def _is_saved_ssl_cert_available(self):
if not os.path.exists(self.cert_path):
return False
with open(self.cert_path, 'r') as f:
contents = f.read()
if contents == '': # CA signed
return True
# pinned self-signed cert
try:
b = pem.dePem(contents, 'CERTIFICATE')
except SyntaxError as e:
self.logger.info(f"error parsing already saved cert: {e}")
raise ErrorParsingSSLCert(e) from e
try:
x = x509.X509(b)
except Exception as e:
self.logger.info(f"error parsing already saved cert: {e}")
raise ErrorParsingSSLCert(e) from e
try:
x.check_date()
return True
except x509.CertificateError as e:
self.logger.info(f"certificate has expired: {e}")
os.unlink(self.cert_path) # delete pinned cert only in this case
return False
async def _get_ssl_context(self):
if self.protocol != 's':
# using plaintext TCP
return None
# see if we already have cert for this server; or get it for the first time
ca_sslc = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path)
if not self._is_saved_ssl_cert_available():
try:
await self._try_saving_ssl_cert_for_first_time(ca_sslc)
except (OSError, ConnectError, aiorpcx.socks.SOCKSError) as e:
raise ErrorGettingSSLCertFromServer(e) from e
# now we have a file saved in our certificate store
siz = os.stat(self.cert_path).st_size
if siz == 0:
# CA signed cert
sslc = ca_sslc
else:
# pinned self-signed cert
sslc = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=self.cert_path)
sslc.check_hostname = 0
return sslc
def handle_disconnect(func):
async def wrapper_func(self: 'Interface', *args, **kwargs):
try:
return await func(self, *args, **kwargs)
except GracefulDisconnect as e:
self.logger.log(e.log_level, f"disconnecting due to {repr(e)}")
except aiorpcx.jsonrpc.RPCError as e:
self.logger.warning(f"disconnecting due to {repr(e)}")
self.logger.debug(f"(disconnect) trace for {repr(e)}", exc_info=True)
finally:
await self.network.connection_down(self)
if not self.got_disconnected.done():
self.got_disconnected.set_result(1)
# if was not 'ready' yet, schedule waiting coroutines:
self.ready.cancel()
return wrapper_func
@ignore_exceptions # do not kill main_taskgroup
@log_exceptions
@handle_disconnect
async def run(self):
try:
ssl_context = await self._get_ssl_context()
except (ErrorParsingSSLCert, ErrorGettingSSLCertFromServer) as e:
self.logger.info(f'disconnecting due to: {repr(e)}')
return
try:
await self.open_session(ssl_context)
except (asyncio.CancelledError, ConnectError, aiorpcx.socks.SOCKSError) as e:
# make SSL errors for main interface more visible (to help servers ops debug cert pinning issues)
if (isinstance(e, ConnectError) and isinstance(e.__cause__, ssl.SSLError)
and self.is_main_server() and not self.network.auto_connect):
self.logger.warning(f'Cannot connect to main server due to SSL error '
f'(maybe cert changed compared to "{self.cert_path}"). Exc: {repr(e)}')
else:
self.logger.info(f'disconnecting due to: {repr(e)}')
return
def _mark_ready(self) -> None:
if self.ready.cancelled():
raise GracefulDisconnect('conn establishment was too slow; *ready* future was cancelled')
if self.ready.done():
return
assert self.tip_header
chain = blockchain.check_header(self.tip_header)
if not chain:
self.blockchain = blockchain.get_best_chain()
else:
self.blockchain = chain
assert self.blockchain is not None
self.logger.info(f"set blockchain with height {self.blockchain.height()}")
self.ready.set_result(1)
async def save_certificate(self):
if not os.path.exists(self.cert_path):
# we may need to retry this a few times, in case the handshake hasn't completed
for _ in range(10):
dercert = await self.get_certificate()
if dercert:
self.logger.info("succeeded in getting cert")
with open(self.cert_path, 'w') as f:
cert = ssl.DER_cert_to_PEM_cert(dercert)
# workaround android bug
cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert)
f.write(cert)
# even though close flushes we can't fsync when closed.
# and we must flush before fsyncing, cause flush flushes to OS buffer
# fsync writes to OS buffer to disk
f.flush()
os.fsync(f.fileno())
break
await asyncio.sleep(1)
else:
raise GracefulDisconnect("could not get certificate after 10 tries")
async def get_certificate(self):
sslc = ssl.SSLContext()
try:
async with _RSClient(session_factory=RPCSession,
host=self.host, port=self.port,
ssl=sslc, proxy=self.proxy) as session:
return session.transport._asyncio_transport._ssl_protocol._sslpipe._sslobj.getpeercert(True)
except ValueError:
return None
async def get_block_header(self, height, assert_mode):
self.logger.info(f'requesting block header {height} in mode {assert_mode}')
# use lower timeout as we usually have network.bhi_lock here
timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Urgent)
res = await self.session.send_request('blockchain.block.headers', [height,1], timeout=timeout)
return blockchain.deserialize_header(bytes.fromhex(res['hex']), height)
async def request_chunk(self, height: int, tip=None, *, can_return_early=False):
index = height // 2016
if can_return_early and index in self._requested_chunks:
return
self.logger.info(f"requesting chunk from height {height}")
size = 2016
if tip is not None:
size = min(size, tip - index * 2016 + 1)
size = max(size, 0)
try:
self._requested_chunks.add(index)
res = await self.session.send_request('blockchain.block.headers', [index * 2016, size])
finally:
self._requested_chunks.discard(index)
conn = self.blockchain.connect_chunk(index, res['hex'])
if not conn:
return conn, 0
return conn, res['count']
def is_main_server(self) -> bool:
return self.network.default_server == self.server
async def open_session(self, sslc, exit_early=False):
async with _RSClient(session_factory=NotificationSession,
host=self.host, port=self.port,
ssl=sslc, proxy=self.proxy) as session:
self.session = session # type: NotificationSession
self.session.interface = self
self.session.set_default_timeout(self.network.get_network_timeout_seconds(NetworkTimeout.Generic))
try:
ver = await session.send_request('server.version', [self.client_name(), version.PROTOCOL_VERSION])
except aiorpcx.jsonrpc.RPCError as e:
raise GracefulDisconnect(e) # probably 'unsupported protocol version'
if exit_early:
return
if not self.network.check_interface_against_healthy_spread_of_connected_servers(self):
raise GracefulDisconnect(f'too many connected servers already '
f'in bucket {self.bucket_based_on_ipaddress()}')
self.logger.info(f"connection established. version: {ver}")
try:
async with self.group as group:
await group.spawn(self.ping)
await group.spawn(self.run_fetch_blocks)
await group.spawn(self.monitor_connection)
except aiorpcx.jsonrpc.RPCError as e:
if e.code in (JSONRPC.EXCESSIVE_RESOURCE_USAGE,
JSONRPC.SERVER_BUSY,
JSONRPC.METHOD_NOT_FOUND):
raise GracefulDisconnect(e, log_level=logging.WARNING) from e
raise
async def monitor_connection(self):
while True:
await asyncio.sleep(1)
if not self.session or self.session.is_closing():
raise GracefulDisconnect('session was closed')
async def ping(self):
while True:
await asyncio.sleep(300)
await self.session.send_request('server.ping')
async def close(self):
if self.session:
await self.session.close()
# monitor_connection will cancel tasks
async def run_fetch_blocks(self):
header_queue = asyncio.Queue()
await self.session.subscribe('blockchain.headers.subscribe', [True], header_queue)
while True:
item = await header_queue.get()
print(item)
raw_header = item[1]
print(raw_header)
height = raw_header['height']
header = blockchain.deserialize_header(bfh(raw_header['hex']), height)
self.tip_header = header
self.tip = height
if self.tip < constants.net.max_checkpoint():
raise GracefulDisconnect('server tip below max checkpoint')
self._mark_ready()
await self._process_header_at_tip()
self.network.trigger_callback('network_updated')
await self.network.switch_unwanted_fork_interface()
await self.network.switch_lagging_interface()
async def _process_header_at_tip(self):
height, header = self.tip, self.tip_header
async with self.network.bhi_lock:
if self.blockchain.height() >= height and self.blockchain.check_header(header):
# another interface amended the blockchain
self.logger.info(f"skipping header {height}")
return
_, height = await self.step(height, header)
# in the simple case, height == self.tip+1
if height <= self.tip:
await self.sync_until(height)
self.network.trigger_callback('blockchain_updated')
async def sync_until(self, height, next_height=None):
if next_height is None:
next_height = self.tip
last = None
while last is None or height <= next_height:
prev_last, prev_height = last, height
if next_height > height + 10:
could_connect, num_headers = await self.request_chunk(height, next_height)
if not could_connect:
if height <= constants.net.max_checkpoint():
raise GracefulDisconnect('server chain conflicts with checkpoints or genesis')
last, height = await self.step(height)
continue
self.network.trigger_callback('network_updated')
height = (height // 2016 * 2016) + num_headers
assert height <= next_height+1, (height, self.tip)
last = 'catchup'
else:
last, height = await self.step(height)
assert (prev_last, prev_height) != (last, height), 'had to prevent infinite loop in interface.sync_until'
return last, height
async def step(self, height, header=None):
assert 0 <= height <= self.tip, (height, self.tip)
if header is None:
header = await self.get_block_header(height, 'catchup')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain:
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
# note: there is an edge case here that is not handled.
# we might know the blockhash (enough for check_header) but
# not have the header itself. e.g. regtest chain with only genesis.
# this situation resolves itself on the next block
return 'catchup', height+1
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
if not can_connect:
self.logger.info(f"can't connect {height}")
height, header, bad, bad_header = await self._search_headers_backwards(height, header)
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
assert chain or can_connect
if can_connect:
self.logger.info(f"could connect {height}")
height += 1
if isinstance(can_connect, Blockchain): # not when mocking
self.blockchain = can_connect
self.blockchain.save_header(header)
return 'catchup', height
good, bad, bad_header = await self._search_headers_binary(height, bad, bad_header, chain)
return await self._resolve_potential_chain_fork_given_forkpoint(good, bad, bad_header)
async def _search_headers_binary(self, height, bad, bad_header, chain):
assert bad == bad_header['block_height']
_assert_header_does_not_check_against_any_chain(bad_header)
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
good = height
while True:
assert good < bad, (good, bad)
height = (good + bad) // 2
self.logger.info(f"binary step. good {good}, bad {bad}, height {height}")
header = await self.get_block_header(height, 'binary')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain:
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
good = height
else:
bad = height
bad_header = header
if good + 1 == bad:
break
mock = 'mock' in bad_header and bad_header['mock']['connect'](height)
real = not mock and self.blockchain.can_connect(bad_header, check_height=False)
if not real and not mock:
raise Exception('unexpected bad header during binary: {}'.format(bad_header))
_assert_header_does_not_check_against_any_chain(bad_header)
self.logger.info(f"binary search exited. good {good}, bad {bad}")
return good, bad, bad_header
async def _resolve_potential_chain_fork_given_forkpoint(self, good, bad, bad_header):
assert good + 1 == bad
assert bad == bad_header['block_height']
_assert_header_does_not_check_against_any_chain(bad_header)
# 'good' is the height of a block 'good_header', somewhere in self.blockchain.
# bad_header connects to good_header; bad_header itself is NOT in self.blockchain.
bh = self.blockchain.height()
assert bh >= good, (bh, good)
if bh == good:
height = good + 1
self.logger.info(f"catching up from {height}")
return 'no_fork', height
# this is a new fork we don't yet have
height = bad + 1
self.logger.info(f"new fork at bad height {bad}")
forkfun = self.blockchain.fork if 'mock' not in bad_header else bad_header['mock']['fork']
b = forkfun(bad_header) # type: Blockchain
self.blockchain = b
assert b.forkpoint == bad
return 'fork', height
async def _search_headers_backwards(self, height, header):
async def iterate():
nonlocal height, header
checkp = False
if height <= constants.net.max_checkpoint():
height = constants.net.max_checkpoint()
checkp = True
header = await self.get_block_header(height, 'backward')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
if chain or can_connect:
return False
if checkp:
raise GracefulDisconnect("server chain conflicts with checkpoints")
return True
bad, bad_header = height, header
_assert_header_does_not_check_against_any_chain(bad_header)
with blockchain.blockchains_lock: chains = list(blockchain.blockchains.values())
local_max = max([0] + [x.height() for x in chains]) if 'mock' not in header else float('inf')
height = min(local_max + 1, height - 1)
while await iterate():
bad, bad_header = height, header
delta = self.tip - height
height = self.tip - 2 * delta
_assert_header_does_not_check_against_any_chain(bad_header)
self.logger.info(f"exiting backward mode at {height}")
return height, header, bad, bad_header
@classmethod
def client_name(cls) -> str:
return f'electrum/{version.ELECTRUM_VERSION}'
def is_tor(self):
return self.host.endswith('.onion')
def ip_addr(self) -> Optional[str]:
session = self.session
if not session: return None
peer_addr = session.remote_address()
if not peer_addr: return None
return str(peer_addr.host)
def bucket_based_on_ipaddress(self) -> str:
def do_bucket():
if self.is_tor():
return BUCKET_NAME_OF_ONION_SERVERS
try:
ip_addr = ip_address(self.ip_addr())
except ValueError:
return ''
if not ip_addr:
return ''
if ip_addr.version == 4:
slash16 = IPv4Network(ip_addr).supernet(prefixlen_diff=32-16)
return str(slash16)
elif ip_addr.version == 6:
slash48 = IPv6Network(ip_addr).supernet(prefixlen_diff=128-48)
return str(slash48)
return ''
if not self._ipaddr_bucket:
self._ipaddr_bucket = do_bucket()
return self._ipaddr_bucket
def _assert_header_does_not_check_against_any_chain(header: dict) -> None:
chain_bad = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain_bad:
raise Exception('bad_header must not check!')
def check_cert(host, cert):
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stdout)
return
try:
x.check_date()
expired = False
except:
expired = True
m = "host: %s\n"%host
m += "has_expired: %s\n"% expired
util.print_msg(m)
# Used by tests
def _match_hostname(name, val):
if val == name:
return True
return val.startswith('*.') and name.endswith(val[1:])
def test_certificates():
from .simple_config import SimpleConfig
config = SimpleConfig()
mydir = os.path.join(config.path, "certs")
certs = os.listdir(mydir)
for c in certs:
p = os.path.join(mydir,c)
with open(p, encoding='utf-8') as f:
cert = f.read()
check_cert(c, cert)
if __name__ == "__main__":
test_certificates()
|
from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME mmtbx.ss_idealization
from mmtbx.secondary_structure import build as ssb
import mmtbx.model
import iotbx.pdb
import os, sys
def run(args):
pdb_inp = iotbx.pdb.input(source_info=None,
file_name=args[0])
model = mmtbx.model.manager(
model_input=pdb_inp)
params = ssb.master_phil.extract()
params.ss_idealization.file_name_before_regularization="before_reg.pdb"
params.ss_idealization.enabled=True
rm = ssb.substitute_ss(
model = model,
params = params,
log=sys.stdout)
out_fname = "%s_ss_ideal.pdb" % os.path.basename(args[0])
txt = model.model_as_pdb()
with open(out_fname, 'w') as f:
f.write(txt)
print "File saved: %s" % out_fname
print "All done."
if __name__ == "__main__" :
run(sys.argv[1:])
|
from collections import deque
from typing import Dict, Set, Deque, List, Optional
from sc2.data import Race
from sc2.position import Point2
from sharpy.events import UnitDestroyedEvent
from sharpy.interfaces import IMemoryManager
from sharpy.managers.core import ManagerBase
from sc2.ids.unit_typeid import UnitTypeId
from sc2.unit import Unit
from sc2.units import Units
MAX_SNAPSHOTS_PER_UNIT = 10
BURROWED_ALIAS: Set[UnitTypeId] = {
UnitTypeId.BANELINGBURROWED,
UnitTypeId.CREEPTUMORBURROWED,
UnitTypeId.DRONEBURROWED,
UnitTypeId.HYDRALISKBURROWED,
UnitTypeId.INFESTORBURROWED,
UnitTypeId.INFESTORTERRANBURROWED,
UnitTypeId.LURKERMPBURROWED,
UnitTypeId.QUEENBURROWED,
UnitTypeId.RAVAGERBURROWED,
UnitTypeId.ROACHBURROWED,
UnitTypeId.SWARMHOSTBURROWEDMP,
UnitTypeId.ULTRALISKBURROWED,
UnitTypeId.WIDOWMINEBURROWED,
UnitTypeId.ZERGLINGBURROWED,
}
class MemoryManager(ManagerBase, IMemoryManager):
"""Manages memories of where enemy units have last been seen.
Structures are ignored because they have two tags. One for the real building and another
for the building's snapshot when under fog of war.
"""
detectors: Set[UnitTypeId]
def __init__(self):
super().__init__()
# Dictionary of units that we remember the position of. Keyed by unit tag.
# Deque is used so that new snapshots are added to the left, and old ones are removed from the right.
self._memory_units_by_tag: Dict[int, Deque[Unit]] = dict()
# Dictionary of units that we know of, but which are longer present at the location last seen. Keyed by unit tag.
self._archive_units_by_tag: Dict[int, Deque[Unit]] = dict()
self._tags_destroyed: Set[int] = set()
self.unit_dict: Dict[int, Deque[Unit]] = dict()
self.expire_air = 60 # Time in seconds when snapshot expires
self.expire_ground = 360 # Time in seconds when snapshot expires
async def start(self, knowledge: "Knowledge"):
await super().start(knowledge)
if knowledge.my_race == Race.Protoss:
self.detectors = {UnitTypeId.PHOTONCANNON, UnitTypeId.OBSERVER, UnitTypeId.OBSERVERSIEGEMODE}
elif knowledge.my_race == Race.Terran:
self.detectors = {UnitTypeId.MISSILETURRET, UnitTypeId.RAVEN}
else:
self.detectors = {UnitTypeId.OVERSEERSIEGEMODE, UnitTypeId.OVERSEER, UnitTypeId.SPORECRAWLER}
knowledge.register_on_unit_destroyed_listener(self.on_unit_destroyed)
async def update(self):
detectors = None
self.unit_dict.clear()
# Iterate all currently visible enemy units.
# self.ai.enemy_units is used here because it does not include memory lane units
for unit in self.ai.enemy_units:
# Make sure that we have not added the same unit tag to both dictionaries, as that could
# create very confusing bugs.
assert not (unit.tag in self._memory_units_by_tag and unit.tag in self._archive_units_by_tag)
# Ignore certain types
if unit.type_id in ignored_unit_types:
continue
if unit.tag in self._archive_units_by_tag:
snaps = self._archive_units_by_tag.pop(unit.tag)
else:
snaps = self._memory_units_by_tag.get(unit.tag, deque(maxlen=MAX_SNAPSHOTS_PER_UNIT))
snaps.appendleft(unit)
if unit.tag not in self._memory_units_by_tag:
self._memory_units_by_tag[unit.tag] = snaps
self.unit_dict[unit.tag] = unit
memory_tags_to_remove = list()
for unit_tag in self._memory_units_by_tag:
if self.is_unit_visible(unit_tag):
continue
snap = self.get_latest_snapshot(unit_tag)
points: List[Point2] = []
points.append(Point2((int(snap.position.x), int(snap.position.y))))
points.append(Point2((int(snap.position.x + 1), int(snap.position.y))))
points.append(Point2((int(snap.position.x), int(snap.position.y + 1))))
points.append(Point2((int(snap.position.x + 1), int(snap.position.y + 1))))
visible = True
for point in points:
if not self.ai.is_visible(point):
visible = False
expired = self.check_expiration(snap)
if expired:
self.clear_unit_cache(memory_tags_to_remove, unit_tag)
elif visible:
# We see that the unit is no longer there.
if (snap.type_id in BURROWED_ALIAS or snap.is_burrowed) and unit_tag not in self._tags_destroyed:
if detectors is None:
detectors = self.cache.own(self.detectors)
if detectors.closer_than(11, snap.position):
self.clear_unit_cache(memory_tags_to_remove, unit_tag)
else:
# For burrowed units, let's change the snapshot
snap._proto.is_burrowed = True
# snap._proto.unit_type = BURROWED_ALIAS.get(snap.type_id, snap.type_id).value # int value
snap.cache.clear()
else:
self.clear_unit_cache(memory_tags_to_remove, unit_tag)
for tag in memory_tags_to_remove:
self._memory_units_by_tag.pop(tag)
memory_units = self.ghost_units
# Merge enemy data with memories
self.ai.enemy_units = self.ai.enemy_units + memory_units
self.ai.all_enemy_units = self.ai.all_enemy_units + memory_units
def clear_unit_cache(self, memory_tags_to_remove, unit_tag):
memory_tags_to_remove.append(unit_tag)
snaps = self._memory_units_by_tag.get(unit_tag)
self._archive_units_by_tag[unit_tag] = snaps
async def post_update(self):
if not self.debug:
return
for unit in self.ghost_units: # type: Unit
self.ai._client.debug_text_world(f"{unit.type_id.name}", unit.position3d, size=10)
@property
def ghost_units(self) -> Units:
"""Returns latest snapshot for all units that we know of but which are currently not visible."""
memory_units = Units([], self.ai)
for tag in self._memory_units_by_tag:
if self.is_unit_visible(tag):
continue
snap = self.get_latest_snapshot(tag)
memory_units.append(snap)
return memory_units
# return memory_units.visible
def get_latest_snapshot(self, unit_tag: int) -> Unit:
"""Returns the latest snapshot of a unit. Throws KeyError if unit_tag is not found."""
unit_deque = self._memory_units_by_tag[unit_tag]
return unit_deque[0]
def is_unit_visible(self, unit_tag: int) -> bool:
"""Returns true if the unit is visible on this frame."""
unit: Optional[Unit] = self.unit_dict.get(unit_tag, None)
return unit is not None and not unit.is_memory
def on_unit_destroyed(self, event: UnitDestroyedEvent):
"""Call this when a unit is destroyed, to make sure that the unit is erased from memory."""
# Remove the unit from frozen dictionaries.
self._memory_units_by_tag.pop(event.unit_tag, None)
self._archive_units_by_tag.pop(event.unit_tag, None)
self._tags_destroyed.add(event.unit_tag)
def check_expiration(self, snap: Unit) -> bool:
if snap.is_flying:
return snap.age > self.expire_air
return snap.age > self.expire_ground
# Will this end up being the same set as in enemy_units_manager.py ?
ignored_unit_types = {
# Protoss
UnitTypeId.INTERCEPTOR,
# Terran
UnitTypeId.MULE,
UnitTypeId.AUTOTURRET,
# Zerg
# Cocoons?
UnitTypeId.LARVA,
UnitTypeId.LOCUSTMP,
UnitTypeId.LOCUSTMPFLYING,
UnitTypeId.INFESTEDTERRAN,
UnitTypeId.BROODLING,
}
|
"""
Tests for salt.states.zpool
:codeauthor: Jorge Schrauwen <sjorge@blackdot.be>
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: salt.utils.zfs, salt.modules.zpool
:platform: illumos,freebsd,linux
"""
import salt.loader
import salt.states.zpool as zpool
import salt.utils.zfs
from salt.utils.odict import OrderedDict
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
from tests.support.zfs import ZFSMockData
class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.states.zpool
"""
@classmethod
def setUpClass(cls):
cls.utils_patch = ZFSMockData().get_patched_utils()
@classmethod
def tearDownClass(cls):
cls.utils_patch = None
def setup_loader_modules(self):
self.opts = opts = salt.config.DEFAULT_MINION_OPTS.copy()
utils = salt.loader.utils(opts, whitelist=["zfs"])
zpool_obj = {
zpool: {
"__opts__": opts,
"__grains__": {"kernel": "SunOS"},
"__utils__": utils,
}
}
return zpool_obj
def test_absent_without_pool(self):
"""
Test zpool absent without a pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool is absent",
"changes": {},
}
mock_exists = MagicMock(return_value=False)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertEqual(zpool.absent("myzpool"), ret)
def test_absent_destroy_pool(self):
"""
Test zpool absent destroying pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was destroyed",
"changes": {"myzpool": "destroyed"},
}
mock_exists = MagicMock(return_value=True)
mock_destroy = MagicMock(return_value=OrderedDict([("destroyed", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.destroy": mock_destroy}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.absent("myzpool"), ret)
def test_absent_exporty_pool(self):
"""
Test zpool absent exporting pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was exported",
"changes": {"myzpool": "exported"},
}
mock_exists = MagicMock(return_value=True)
mock_destroy = MagicMock(return_value=OrderedDict([("exported", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.export": mock_destroy}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.absent("myzpool", export=True), ret)
def test_absent_busy(self):
"""
Test zpool absent on a busy pool
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "\n".join(
[
"cannot unmount '/myzpool': Device busy",
"cannot export 'myzpool': pool is busy",
]
),
"changes": {},
}
mock_exists = MagicMock(return_value=True)
mock_destroy = MagicMock(
return_value=OrderedDict(
[
("exported", False),
(
"error",
"\n".join(
[
"cannot unmount '/myzpool': Device busy",
"cannot export 'myzpool': pool is busy",
]
),
),
]
)
)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.export": mock_destroy}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.absent("myzpool", export=True), ret)
def test_present_import_success(self):
"""
Test zpool present with import allowed and unimported pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was imported",
"changes": {"myzpool": "imported"},
}
config = {
"import": True,
}
mock_exists = MagicMock(return_value=False)
mock_import = MagicMock(return_value=OrderedDict([("imported", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.import": mock_import}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.present("myzpool", config=config), ret)
def test_present_import_fail(self):
"""
Test zpool present with import allowed and no unimported pool or layout
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "storage pool myzpool was not imported, no (valid) layout specified for creation",
"changes": {},
}
config = {
"import": True,
}
mock_exists = MagicMock(return_value=False)
mock_import = MagicMock(return_value=OrderedDict([("imported", False)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.import": mock_import}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.present("myzpool", config=config), ret)
def test_present_create_success(self):
"""
Test zpool present with non existing pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was created",
"changes": {"myzpool": "created"},
}
config = {
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": True,
}
filesystem_properties = {
"quota": "5G",
}
mock_exists = MagicMock(return_value=False)
mock_create = MagicMock(
return_value=OrderedDict(
[
("created", True),
(
"vdevs",
OrderedDict(
[
("mirror-0", ["/dev/dsk/disk0", "/dev/dsk/disk1"]),
("mirror-1", ["/dev/dsk/disk2", "/dev/dsk/disk3"]),
]
),
),
]
)
)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.create": mock_create}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
filesystem_properties=filesystem_properties,
),
ret,
)
def test_present_create_fail(self):
"""
Test zpool present with non existing pool (without a layout)
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "storage pool myzpool was not imported, no (valid) layout specified for creation",
"changes": {},
}
config = {
"import": False,
}
mock_exists = MagicMock(return_value=False)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertEqual(zpool.present("myzpool", config=config), ret)
def test_present_create_passthrough_fail(self):
"""
Test zpool present with non existing pool (without a layout)
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "\n".join(
[
"invalid vdev specification",
"use 'force=True' to override the following errors:",
"/data/salt/vdisk0 is part of exported pool 'zsalt'",
"/data/salt/vdisk1 is part of exported pool 'zsalt'",
]
),
"changes": {},
}
config = {
"force": False,
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": True,
}
filesystem_properties = {
"quota": "5G",
}
mock_exists = MagicMock(return_value=False)
mock_create = MagicMock(
return_value=OrderedDict(
[
("created", False),
(
"error",
"\n".join(
[
"invalid vdev specification",
"use 'force=True' to override the following errors:",
"/data/salt/vdisk0 is part of exported pool 'zsalt'",
"/data/salt/vdisk1 is part of exported pool 'zsalt'",
]
),
),
]
)
)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.create": mock_create}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
filesystem_properties=filesystem_properties,
),
ret,
)
def test_present_update_success(self):
"""
Test zpool present with an existing pool that needs an update
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "properties updated",
"changes": {"myzpool": {"autoexpand": False}},
}
config = {
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": False,
}
mock_exists = MagicMock(return_value=True)
mock_get = MagicMock(
return_value=OrderedDict(
[
("comment", "salt managed pool"),
("freeing", 0),
("listsnapshots", False),
("leaked", 0),
("feature@obsolete_counts", "enabled"),
("feature@sha512", "enabled"),
("delegation", True),
("dedupditto", "0"),
("dedupratio", "1.00x"),
("autoexpand", True),
("feature@bookmarks", "enabled"),
("allocated", 115712),
("guid", 1591906802560842214),
("feature@large_blocks", "enabled"),
("size", 2113929216),
("feature@enabled_txg", "active"),
("feature@hole_birth", "active"),
("capacity", 0),
("feature@multi_vdev_crash_dump", "enabled"),
("feature@extensible_dataset", "enabled"),
("cachefile", "-"),
("bootfs", "-"),
("autoreplace", True),
("readonly", False),
("version", "-"),
("health", "ONLINE"),
("expandsize", "-"),
("feature@embedded_data", "active"),
("feature@lz4_compress", "active"),
("feature@async_destroy", "enabled"),
("feature@skein", "enabled"),
("feature@empty_bpobj", "enabled"),
("feature@spacemap_histogram", "active"),
("bootsize", "-"),
("free", 2113813504),
("feature@device_removal", "enabled"),
("failmode", "wait"),
("feature@filesystem_limits", "enabled"),
("feature@edonr", "enabled"),
("altroot", "-"),
("fragmentation", "0%"),
]
)
)
mock_set = MagicMock(return_value=OrderedDict([("set", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.get": mock_get}
), patch.dict(zpool.__salt__, {"zpool.set": mock_set}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertEqual(
zpool.present(
"myzpool", config=config, layout=layout, properties=properties,
),
ret,
)
def test_present_update_nochange_success(self):
"""
Test zpool present with non existing pool
"""
config = {
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": True,
}
mock_exists = MagicMock(return_value=True)
mock_get = MagicMock(
return_value=OrderedDict(
[
("comment", "salt managed pool"),
("freeing", 0),
("listsnapshots", False),
("leaked", 0),
("feature@obsolete_counts", "enabled"),
("feature@sha512", "enabled"),
("delegation", True),
("dedupditto", "0"),
("dedupratio", "1.00x"),
("autoexpand", True),
("feature@bookmarks", "enabled"),
("allocated", 115712),
("guid", 1591906802560842214),
("feature@large_blocks", "enabled"),
("size", 2113929216),
("feature@enabled_txg", "active"),
("feature@hole_birth", "active"),
("capacity", 0),
("feature@multi_vdev_crash_dump", "enabled"),
("feature@extensible_dataset", "enabled"),
("cachefile", "-"),
("bootfs", "-"),
("autoreplace", True),
("readonly", False),
("version", "-"),
("health", "ONLINE"),
("expandsize", "-"),
("feature@embedded_data", "active"),
("feature@lz4_compress", "active"),
("feature@async_destroy", "enabled"),
("feature@skein", "enabled"),
("feature@empty_bpobj", "enabled"),
("feature@spacemap_histogram", "active"),
("bootsize", "-"),
("free", 2113813504),
("feature@device_removal", "enabled"),
("failmode", "wait"),
("feature@filesystem_limits", "enabled"),
("feature@edonr", "enabled"),
("altroot", "-"),
("fragmentation", "0%"),
]
)
)
ret = {
"name": "myzpool",
"result": True,
"comment": "no update needed",
"changes": {},
}
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}):
with patch.dict(zpool.__salt__, {"zpool.get": mock_get}):
with patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
),
ret,
)
# Run state with test=true
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool is uptodate",
"changes": {},
}
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}):
with patch.dict(zpool.__salt__, {"zpool.get": mock_get}):
with patch.dict(zpool.__utils__, self.utils_patch):
with patch.dict(zpool.__opts__, {"test": True}):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
),
ret,
)
|
from django.conf.urls import patterns, include, url
from django.views.generic import RedirectView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', RedirectView.as_view(url='/xAPI/')),
url(r'^XAPI/', include('lrs.urls')),
url(r'^xapi/', include('lrs.urls')),
url(r'^xAPI/', include('lrs.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('',
url(r'^accounts/login/$', 'django.contrib.auth.views.login', name="login"),
url(r'^accounts/logout/$', 'lrs.views.logout_view', name="logout"),
)
|
from flask_wtf import FlaskForm
from wtforms import BooleanField, HiddenField, StringField, SubmitField, ValidationError
from wtforms.validators import Length, Required
from .. models import EventFrameTemplateView
class CopyEventFrameTemplateViewForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
default = BooleanField("Default")
eventFrameTemplateId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first()
if eventFrameTemplateView is not None:
# Trying to copy an eventFrameTemplateView using a name that already exists.
validationError = True
if validationError:
raise ValidationError(f'The name "{field.data}" already exists.')
class EventFrameTemplateViewForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
default = BooleanField("Default")
selectable = BooleanField("Selectable", default = "checked")
eventFrameTemplateId = HiddenField()
eventFrameTemplateViewId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first()
if eventFrameTemplateView is not None:
if self.eventFrameTemplateViewId.data == "":
# Trying to add a new event frame template view using a name that already exists.
validationError = True
else:
if int(self.eventFrameTemplateViewId.data) != eventFrameTemplateView.EventFrameTemplateViewId:
# Trying to change the name of a event frame template view to a name that already exists.
validationError = True
if validationError is True:
raise ValidationError('The name "{}" already exists.'.format(field.data))
|
import math
import os
import random
import re
import sys
n = int(input(""))
number = int(n)
number= n%2
if n ==1:
print("Weird")
else:
if (number==0) and (n >= 2) or ((number==0) and (n <= 5)):
print("Not Weird")
elif (number ==0) and (n>=6) and ((number==0) and (n<=20)):
print ("Weird")
elif number==0 and n>20:
print ("Not Weird")
else :
print ("Weird")
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ConvBERT checkpoint."""
import argparse
from transformers4 import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert
from transformers4.utils import logging
logging.set_verbosity_info()
def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path):
conf = ConvBertConfig.from_json_file(convbert_config_file)
model = ConvBertModel(conf)
model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path)
model.save_pretrained(pytorch_dump_path)
tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True)
tf_model.save_pretrained(pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--convbert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained ConvBERT model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
|
from gpiozero import LED
from time import sleep
import socket
import serial
#low level IC control pins
led1 = LED(13) #A0 pin
led2 = LED(6) #A1 pin
led3 = LED(5) #A2 pin
led4 = LED(27) #led4+5 = device rest plate
led5 = LED(22)
###setup communication with C# host software
##TCP_IP = '169.254.130.182'
##TCP_PORT = 5005
##BUFFER_SIZE = 1024
##MESSAGE = "Hello, windummy"
##
##s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
##s.connect((TCP_IP, TCP_PORT))
###s.send(str.encode(MESSAGE))
#setup communication with Arduino Mega CNC controller
ser = serial.Serial(
port="/dev/ttyACM1",
baudrate=250000,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
dsrdtr=True,
rtscts=True,
timeout=1
)
ser.get_settings()
ser.readlines()
SerialBufferIsClear = True
def SwitchMUXtoA():
led1.off()
led2.off()
led3.off()
def SwitchMUXtoB():
led1.on()
led2.off()
led3.off()
def SwitchMUXtoC():
led1.off()
led2.on()
led3.off()
def SwitchMUXtoD():
led1.on()
led2.on()
led3.off()
def RestPlateON():
led4.on()
led5.on()
def RestPlateOFF():
led4.off()
led5.off()
#gcode-pixel positions 05/21/18
#pixel A: X5.3 Z0.9
#pixel B: X0 Z2.9
#pixel C: X2.1 Z8.3
#pixel D: X7.4 Z6.3
def SwitchToPixelA():
SwitchMUXtoA()
ser.write('G1 X5.3 Z0.9\n'.encode())
def SwitchToPixelB():
SwitchMUXtoB()
ser.write('G1 X0 Z2.9\n'.encode())
def SwitchToPixelC():
SwitchMUXtoC()
ser.write('G1 X2.1 Z8.3\n'.encode())
def SwitchToPixelD():
SwitchMUXtoD()
ser.write('G1 X7.4 Z6.3\n'.encode())
def SwapDevice():
ser.write('G1 E0\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
ser.write('M84\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
sleep(15)
RestPlateOFF()
sleep(3)
RestPlateON()
ser.write('G1 E0\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
ser.write('G1 Y0\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
ser.write('G1 Y32 F777\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
ser.write('G1 E175\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
ser.write('M84\n'.encode())
sleep(20)
RestPlateOFF()
print("Finished swapping devices")
def SystemInitialize():
RestPlateON()
ser.write('M302 P1\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
ser.write('G28 X0 Z0\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
#ser.write('G28 Z0\n'.encode())
ser.write('G1 Y33 F777\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
#ser.write('G1 Y0\n'.encode())
ser.write('G1 E175\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
ser.write('M84\n'.encode())
SerialBufferIsClear = False
while(SerialBufferIsClear != True):
MarlinMessage = ser.readline().decode()
print(MarlinMessage)
if("ok" in MarlinMessage):
SerialBufferIsClear = True
print("got the ok")
sleep(10) #buffer to prevent mistiming
#RestPlateOFF()
print("Finished Initialization")
def GetRawInput():
var = input("Please enter a command:")
print("entered: "+str(var))
if(var=="A"):
SwitchMUXtoA()
return True
if(var=="B"):
SwitchMUXtoB()
return True
if(var=="C"):
SwitchMUXtoC()
return True
if(var=="D"):
SwitchMUXtoD()
return True
if(var=="ActON"):
RestPlateON()
return True
if(var=="ActOFF"):
print("thing worked")
RestPlateOFF()
return True
else:
command = str(var)+"\n"
ser.write(command.encode())
return False
#RestPlateON()
SystemInitialize()
while True:
## data = s.recv(BUFFER_SIZE)
## print(data.decode())
## s.send(str.encode(MESSAGE))
## if data.decode() == "SwitchMUXtoA":
## SwitchMUXtoA()
## MarlinMessage = ser.readline().decode()
## #print(SerialBufferIsClear)
## #print(MarlinMessage)
## if("ok" in MarlinMessage):
## SerialBufferIsClear = True
## print("got the ok")
print("pixel A")
SwitchToPixelA()
sleep(10)
print("pixel B")
SwitchToPixelB()
sleep(10)
print("pixel C")
SwitchToPixelC()
sleep(10)
print("pixel D")
SwitchToPixelD()
sleep(10)
SwapDevice()
## if(SerialBufferIsClear):
## SerialBufferIsClear = GetRawInput()
|
# -*- coding: utf-8 -*-
"""
Created on 12/21/2018
@author: BioinfoTongLI
"""
import numpy as np
import read_roi
from imagepy.core.engine import Free
from imagepy import IPy
from skimage.draw import polygon
class Plugin(Free):
"""load_ij_roi: use read_roi and th pass to shapely objects"""
title = 'Import Rois from IJ'
para = {'path': '', 'name': 'Undefined', 'width': 512, 'height': 512}
view = [(str, 'name', 'name', ''),
(int, 'width', (1, 3000), 0, 'width', 'pix'),
(int, 'height', (1, 3000), 0, 'height', 'pix')]
def load(self):
filt = '|'.join(['%s files (*.%s)|*.%s' % (i.upper(), i, i) for i in ["zip"]])
return IPy.getpath(self.title, filt, 'open', self.para)
def run(self, para=None):
ls = read_roi.read_roi_zip(para['path'])
img = np.zeros((para['height'], para['width']), dtype=np.int32)
for i in ls:
img[polygon(ls[i]['y'], ls[i]['x'], img.shape)] = int(i)
IPy.show_img([img], para['name'])
|
from functools import partial
from typing import Callable, List, Union
import numpy as np
import pandas as pd
from scipy.stats import friedmanchisquare, rankdata
from sklearn.metrics.pairwise import pairwise_distances
from statsmodels.stats.libqsturng import qsturng
Matrix = List[List[float]]
def friedman_nemenyi(table: pd.DataFrame, alpha: float = 0.05):
"""Runs Friedman test on given table and optionally graphs a
critical-difference diagram.
Args:
-----
table: DataFrame
The data table, with subjects as rows and independent variable
(condition) as columns.
alpha: float
Significance level, must be in the range (0, 1), default is
0.05.
Returns:
--------
pval: float
The p-value for the Friedman test.
cd: float
The critical difference from the Nemenyi post-hoc test.
df: pd.DataFrame
A table containing statistics relating to ranking and average
values of the condiions. The dataframe has these columns:
"mean_rank", "mean", "std", "median", "mad", "effect_size".
"""
_, pval = friedmanchisquare(*table.transpose().to_numpy())
names = list(table.columns)
avgrank = rankdata(-table.to_numpy(), axis=1).mean(0)
df = pd.DataFrame(
{
"mean_rank": avgrank,
"mean": table.mean(),
"std": table.std(),
"median": table.median(),
"mad": table.mad(),
},
index=names,
).sort_values("mean_rank")
topclf = df.index[0]
n, k = table.shape
# Effect size is calculated in terms of differences in MAD
df["effect_size"] = (df.loc[topclf, "median"] - df["median"]) / np.sqrt(
((n - 1) * df.loc[topclf, "mad"] ** 2 + (n - 1) * df["mad"] ** 2) / (2 * n - 2)
)
cd = qsturng(1 - alpha, k, np.inf) * np.sqrt((k * (k + 1)) / (12 * n))
return pval, cd, df
def _get_dist_func(metric: Union[Callable, str], **kwargs):
if callable(metric):
return partial(metric, **kwargs)
else:
if metric != "minkowski" and "p" in kwargs:
del kwargs["p"]
if metric != "mahalanobis" and "VI" in kwargs:
del kwargs["VI"]
return partial(pairwise_distances, metric=metric, **kwargs)
def bhattacharyya_dist(x: np.ndarray, y: np.ndarray, pinv: bool = False):
"""Calculate Bhattacharyya distance between multivariate Gaussian
distributions.
Args:
-----
x: array-like
Data matrix of shape (n1_samples, n_features) corresponding to
the first group.
y: array-like
Data matrix of shape (n2_samples, n_features) corresponding to
the second group.
pinv: bool
Use pseudoinverse instead of inverse. This is useful if the
covariance matrices don't have full rank or otherwise aren't
invertible.
"""
mu1 = np.expand_dims(np.mean(x, axis=0), 1)
mu2 = np.expand_dims(np.mean(y, axis=0), 1)
cov1 = np.cov(x, rowvar=False)
cov2 = np.cov(y, rowvar=False)
cov = (cov1 + cov2) / 2
_, ldet1 = np.linalg.slogdet(cov1)
_, ldet2 = np.linalg.slogdet(cov2)
_, ldet = np.linalg.slogdet(cov)
if pinv:
covinv = np.linalg.pinv(cov, hermitian=True, rcond=1e-8)
else:
covinv = np.linalg.inv(cov)
db = (mu1 - mu2).T.dot(covinv).dot(mu1 - mu2) / 8 + ldet / 2 - ldet1 / 4 - ldet2 / 4
return db.item()
def corr_ratio(x: np.ndarray, groups: Union[List[int], np.ndarray]):
"""Calculates correlation ratio for each feature using the given
groups.
Args:
-----
data: numpy.ndarray
Data matrix, with shape (n_instances, n_features).
groups: list or numpy.ndarray
1D array of groups assignments of length n_instances. Groups
should be labelled from 0 to G - 1 inclusive, where G is the
number of groups.
Returns:
--------
eta: numpy.ndarray
1D array of correlation coefficients of length n_features. Each
value is in [0, 1] except if a feature takes only one value, in
which case eta will be nan.
"""
groups = np.array(groups)
n_groups = groups.max() + 1
counts = np.bincount(groups)
mean = x.mean(0)
g_means = np.empty((n_groups, x.shape[1]))
for g in range(n_groups):
g_means[g, :] = x[groups == g].mean(0)
num = np.sum(counts[:, None] * (g_means - mean) ** 2, axis=0)
den = np.sum((x - mean) ** 2, axis=0)
old_err = np.seterr(divide="ignore", invalid="ignore")
eta2 = num / den
np.seterr(**old_err)
return np.sqrt(eta2)
def dunn(
x: np.ndarray,
clusters: Union[List[int], np.ndarray],
intra_method: str = "mean",
inter_method: str = "cent",
metric: Union[Callable, str] = "l2",
p: int = 2,
):
"""Calculates the Dunn index for cluster "goodness".
Args:
-----
data: numpy.ndarray
Data matrix, with shape (n_instances, n_features).
clusters: list or numpy.ndarray
1D array of cluster assignments of length n_instances. Clusters
should be labelled from 0 to C - 1 inclusive, where C is the
number of clusters.
intra_method: str
Method for calculating intra-cluster distance. One of "max",
"mean", "cent".
inter_method: str
Method for calculating inter-cluster distance. One of "cent".
metric: str or callable
Distance metric. If str, must be one of the sklearn or scipy
distance methods. If callable, must take one positional argument
and return a pairwise distance matrix.
p: int
Value of p for p-norm when using "lp" distance metric.
Returns:
--------
dunn: float
The Dunn index for this data and cluster assignment.
"""
clusters = np.array(clusters, dtype=int)
n_clusters = clusters.max() + 1
d = _get_dist_func(metric, p=p)
intra = np.zeros(n_clusters)
for c in range(n_clusters):
clust_data = x[clusters == c]
if intra_method == "max":
idx = np.triu_indices(len(clust_data))
intra[c] = d(clust_data)[idx].max()
elif intra_method == "mean":
idx = np.triu_indices(len(clust_data))
intra[c] = d(clust_data)[idx].mean()
elif intra_method == "cent":
mean = clust_data.mean(0)
intra[c] = d(clust_data, mean[None, :]).mean()
inter = np.zeros((n_clusters, n_clusters))
for i in range(n_clusters):
inter[i, i] = np.inf # To avoid min = 0
for j in range(i + 1, n_clusters):
if inter_method == "cent":
mean_i = x[clusters == i].mean(0)
mean_j = x[clusters == j].mean(0)
inter[i, j] = inter[j, i] = d(mean_i[None, :], mean_j[None, :])
return inter.min() / intra.max()
def kappa(data: np.ndarray):
"""Calculates Fleiss' kappa for inter-rater agreement.
Args:
-----
data: numpy.ndarray
The data matrix, in the form (raters x units).
"""
cats = np.unique(data)
n, N = data.shape
counts = np.stack([np.sum(data == c, 0) for c in cats], 1)
p_j = np.sum(counts, axis=0) / (N * n)
assert np.isclose(np.sum(p_j), 1)
Pe = np.sum(p_j ** 2)
P = (np.sum(counts ** 2, 1) - n) / (n * (n - 1))
Pbar = np.mean(P)
return (Pbar - Pe) / (1 - Pe)
class Deltas:
@staticmethod
def nominal(c: int, k: int):
return float(c != k)
@staticmethod
def interval(c: float, k: float):
return (c - k) ** 2
def alpha(
data: np.ndarray,
delta: Union[Callable[[int, int], float], List[List[float]], str] = "nominal",
):
"""Calculates Krippendorff's alpha coefficient [1, sec. 11.3] for
inter-rater agreement.
[1] K. Krippendorff, Content analysis: An introduction to its
methodology. Sage publications, 2004.
Args:
-----
data: numpy.ndarray
The data matrix, shape (n_raters, n_units). Each cell (i, j)
represents the value assigned to unit j by rater i, or 0
representing no response.
delta: callable, 2-D array-like or str
The delta metric. Default is the nominal metric, which takes the
value 1 in case c != k and 0 otherwise.
"""
# The following implementation was based off the Wikipedia article:
# https://en.wikipedia.org/wiki/Krippendorff%27s_alpha
# Response categories go from 1 to R, 0 represents no response
R = np.max(data)
counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T
count_sum = np.sum(counts, 0)
assert len(count_sum) == R + 1
def ordinal(c: int, k: int):
if k < c:
c, k = k, c
s = (
sum(count_sum[g] for g in range(c, k + 1))
- (count_sum[c] + count_sum[k]) / 2
)
return s ** 2
if isinstance(delta, str):
delta = {
"nominal": Deltas.nominal,
"ordinal": ordinal,
"interval": Deltas.interval,
}[delta]
if not callable(delta):
try:
delta[0][0]
except IndexError:
raise TypeError("delta must be either str, callable or 2D array.")
def _delta(c, k):
new_delta = delta
return new_delta[c][k]
delta = _delta
m_u = np.sum(counts[:, 1:], 1)
valid = m_u >= 2
counts = counts[valid]
m_u = m_u[valid]
data = data[:, valid]
n = np.sum(m_u)
n_cku = np.matmul(counts[:, :, None], counts[:, None, :])
for i in range(R + 1):
n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1)
D_o = 0
for c in range(1, R + 1):
for k in range(1, R + 1):
D_o += delta(c, k) * n_cku[:, c, k]
D_o = np.sum(D_o / (n * (m_u - 1)))
D_e = 0
P_ck = np.bincount(data.flat)
for c in range(1, R + 1):
for k in range(1, R + 1):
D_e += delta(c, k) * P_ck[c] * P_ck[k]
D_e /= n * (n - 1)
return 1 - D_o / D_e
|
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" utility functionality for the 2D pharmacophores code
See Docs/Chem/Pharm2D.triangles.jpg for an illustration of the way
pharmacophores are broken into triangles and labelled.
See Docs/Chem/Pharm2D.signatures.jpg for an illustration of bit
numbering
"""
from __future__ import print_function, division
import itertools
#
# number of points in a scaffold -> sequence of distances (p1,p2) in
# the scaffold
#
nPointDistDict = {
2: ((0, 1), ),
3: ((0, 1), (0, 2), (1, 2)),
4: ((0, 1), (0, 2), (0, 3), (1, 2), (2, 3)),
5: ((0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (2, 3), (3, 4)),
6: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (1, 2), (2, 3), (3, 4), (4, 5)),
7: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)),
8: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7)),
9: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 2), (2, 3), (3, 4),
(4, 5), (5, 6), (6, 7), (7, 8)),
10: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 2), (2, 3),
(3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)),
}
#
# number of distances in a scaffold -> number of points in the scaffold
#
nDistPointDict = {
1: 2,
3: 3,
5: 4,
7: 5,
9: 6,
11: 7,
13: 8,
15: 9,
17: 10,
}
_trianglesInPharmacophore = {}
def GetTriangles(nPts):
""" returns a tuple with the distance indices for
triangles composing an nPts-pharmacophore
"""
global _trianglesInPharmacophore
if nPts < 3:
return []
res = _trianglesInPharmacophore.get(nPts, [])
if not res:
idx1, idx2, idx3 = (0, 1, nPts - 1)
while idx1 < nPts - 2:
res.append((idx1, idx2, idx3))
idx1 += 1
idx2 += 1
idx3 += 1
res = tuple(res)
_trianglesInPharmacophore[nPts] = res
return res
def _fact(x):
if x <= 1:
return 1
accum = 1
for i in range(x):
accum *= i + 1
return accum
def BinsTriangleInequality(d1, d2, d3):
""" checks the triangle inequality for combinations
of distance bins.
the general triangle inequality is:
d1 + d2 >= d3
the conservative binned form of this is:
d1(upper) + d2(upper) >= d3(lower)
"""
if d1[1] + d2[1] < d3[0]:
return False
if d2[1] + d3[1] < d1[0]:
return False
if d3[1] + d1[1] < d2[0]:
return False
return True
def ScaffoldPasses(combo, bins=None):
""" checks the scaffold passed in to see if all
contributing triangles can satisfy the triangle inequality
the scaffold itself (encoded in combo) is a list of binned distances
"""
# this is the number of points in the pharmacophore
nPts = nDistPointDict[len(combo)]
tris = GetTriangles(nPts)
for tri in tris:
ds = [bins[combo[x]] for x in tri]
if not BinsTriangleInequality(ds[0], ds[1], ds[2]):
return False
return True
_numCombDict = {}
def NumCombinations(nItems, nSlots):
""" returns the number of ways to fit nItems into nSlots
We assume that (x,y) and (y,x) are equivalent, and
(x,x) is allowed.
General formula is, for N items and S slots:
res = (N+S-1)! / ( (N-1)! * S! )
"""
global _numCombDict
res = _numCombDict.get((nItems, nSlots), -1)
if res == -1:
res = _fact(nItems + nSlots - 1) // (_fact(nItems - 1) * _fact(nSlots))
_numCombDict[(nItems, nSlots)] = res
return res
_verbose = 0
_countCache = {}
def CountUpTo(nItems, nSlots, vs, idx=0, startAt=0):
""" Figures out where a given combination of indices would
occur in the combinatorial explosion generated by _GetIndexCombinations_
**Arguments**
- nItems: the number of items to distribute
- nSlots: the number of slots in which to distribute them
- vs: a sequence containing the values to find
- idx: used in the recursion
- startAt: used in the recursion
**Returns**
an integer
"""
global _countCache
if _verbose:
print(' ' * idx, 'CountUpTo(%d)' % idx, vs[idx], startAt)
if idx == 0 and (nItems, nSlots, tuple(vs)) in _countCache:
return _countCache[(nItems, nSlots, tuple(vs))]
elif idx >= nSlots:
accum = 0
elif idx == nSlots - 1:
accum = vs[idx] - startAt
else:
accum = 0
# get the digit at idx correct
for i in range(startAt, vs[idx]):
nLevsUnder = nSlots - idx - 1
nValsOver = nItems - i
if _verbose:
print(' ' * idx, ' ', i, nValsOver, nLevsUnder, NumCombinations(nValsOver, nLevsUnder))
accum += NumCombinations(nValsOver, nLevsUnder)
accum += CountUpTo(nItems, nSlots, vs, idx + 1, vs[idx])
if _verbose:
print(' ' * idx, '>', accum)
if idx == 0:
_countCache[(nItems, nSlots, tuple(vs))] = accum
return accum
_indexCombinations = {}
def GetIndexCombinations(nItems, nSlots, slot=0, lastItemVal=0):
""" Generates all combinations of nItems in nSlots without including
duplicates
**Arguments**
- nItems: the number of items to distribute
- nSlots: the number of slots in which to distribute them
- slot: used in recursion
- lastItemVal: used in recursion
**Returns**
a list of lists
"""
global _indexCombinations
if not slot and (nItems, nSlots) in _indexCombinations:
res = _indexCombinations[(nItems, nSlots)]
elif slot >= nSlots:
res = []
elif slot == nSlots - 1:
res = [[x] for x in range(lastItemVal, nItems)]
else:
res = []
for x in range(lastItemVal, nItems):
tmp = GetIndexCombinations(nItems, nSlots, slot + 1, x)
for entry in tmp:
res.append([x] + entry)
if not slot:
_indexCombinations[(nItems, nSlots)] = res
return res
def GetAllCombinations(choices, noDups=1, which=0):
""" Does the combinatorial explosion of the possible combinations
of the elements of _choices_.
**Arguments**
- choices: sequence of sequences with the elements to be enumerated
- noDups: (optional) if this is nonzero, results with duplicates,
e.g. (1,1,0), will not be generated
- which: used in recursion
**Returns**
a list of lists
>>> GetAllCombinations([(0,),(1,),(2,)])
[[0, 1, 2]]
>>> GetAllCombinations([(0,),(1,3),(2,)])
[[0, 1, 2], [0, 3, 2]]
>>> GetAllCombinations([(0,1),(1,3),(2,)])
[[0, 1, 2], [0, 3, 2], [1, 3, 2]]
"""
if which >= len(choices):
res = []
elif which == len(choices) - 1:
res = [[x] for x in choices[which]]
else:
res = []
tmp = GetAllCombinations(choices, noDups=noDups, which=which + 1)
for thing in choices[which]:
for other in tmp:
if not noDups or thing not in other:
res.append([thing] + other)
return res
def GetUniqueCombinations(choices, classes, which=0):
""" Does the combinatorial explosion of the possible combinations
of the elements of _choices_.
"""
# print(choices, classes)
assert len(choices) == len(classes)
if which >= len(choices):
res = []
elif which == len(choices) - 1:
res = [[(classes[which], x)] for x in choices[which]]
else:
res = []
tmp = GetUniqueCombinations(choices, classes, which=which + 1)
for thing in choices[which]:
for other in tmp:
idxThere = 0
for x in other:
if x[1] == thing:
idxThere += 1
if not idxThere:
newL = [(classes[which], thing)] + other
newL.sort()
if newL not in res:
res.append(newL)
return res
def GetUniqueCombinations_new(choices, classes, which=0):
""" Does the combinatorial explosion of the possible combinations
of the elements of _choices_.
"""
# print(choices, classes)
assert len(choices) == len(classes)
combos = set()
for choice in itertools.product(*choices):
# If a choice occurs in more than one of the fields, we ignore this case
if len(set(choice)) != len(choice):
continue
combos.add(tuple(sorted((cls, ch) for cls, ch in zip(classes, choice))))
return [list(combo) for combo in sorted(combos)]
def UniquifyCombinations(combos):
""" uniquifies the combinations in the argument
**Arguments**:
- combos: a sequence of sequences
**Returns**
- a list of tuples containing the unique combos
"""
resD = {}
for combo in combos:
k = combo[:]
k.sort()
resD[tuple(k)] = tuple(combo)
return list(resD.values())
def GetPossibleScaffolds(nPts, bins, useTriangleInequality=True):
""" gets all realizable scaffolds (passing the triangle inequality) with the
given number of points and returns them as a list of tuples
"""
if nPts < 2:
res = 0
elif nPts == 2:
res = [(x, ) for x in range(len(bins))]
else:
nDists = len(nPointDistDict[nPts])
combos = GetAllCombinations([range(len(bins))] * nDists, noDups=0)
res = []
for combo in combos:
if not useTriangleInequality or ScaffoldPasses(combo, bins):
res.append(tuple(combo))
return res
def OrderTriangle(featIndices, dists):
"""
put the distances for a triangle into canonical order
It's easy if the features are all different:
>>> OrderTriangle([0,2,4],[1,2,3])
([0, 2, 4], [1, 2, 3])
It's trickiest if they are all the same:
>>> OrderTriangle([0,0,0],[1,2,3])
([0, 0, 0], [3, 2, 1])
>>> OrderTriangle([0,0,0],[2,1,3])
([0, 0, 0], [3, 2, 1])
>>> OrderTriangle([0,0,0],[1,3,2])
([0, 0, 0], [3, 2, 1])
>>> OrderTriangle([0,0,0],[3,1,2])
([0, 0, 0], [3, 2, 1])
>>> OrderTriangle([0,0,0],[3,2,1])
([0, 0, 0], [3, 2, 1])
>>> OrderTriangle([0,0,1],[3,2,1])
([0, 0, 1], [3, 2, 1])
>>> OrderTriangle([0,0,1],[1,3,2])
([0, 0, 1], [1, 3, 2])
>>> OrderTriangle([0,0,1],[1,2,3])
([0, 0, 1], [1, 3, 2])
>>> OrderTriangle([0,0,1],[1,3,2])
([0, 0, 1], [1, 3, 2])
"""
if len(featIndices) != 3:
raise ValueError('bad indices')
if len(dists) != 3:
raise ValueError('bad dists')
fs = set(featIndices)
if len(fs) == 3:
return featIndices, dists
dSums = [0] * 3
dSums[0] = dists[0] + dists[1]
dSums[1] = dists[0] + dists[2]
dSums[2] = dists[1] + dists[2]
mD = max(dSums)
if len(fs) == 1:
if dSums[0] == mD:
if dists[0] > dists[1]:
ireorder = (0, 1, 2)
dreorder = (0, 1, 2)
else:
ireorder = (0, 2, 1)
dreorder = (1, 0, 2)
elif dSums[1] == mD:
if dists[0] > dists[2]:
ireorder = (1, 0, 2)
dreorder = (0, 2, 1)
else:
ireorder = (1, 2, 0)
dreorder = (2, 0, 1)
else:
if dists[1] > dists[2]:
ireorder = (2, 0, 1)
dreorder = (1, 2, 0)
else:
ireorder = (2, 1, 0)
dreorder = (2, 1, 0)
else:
# two classes
if featIndices[0] == featIndices[1]:
if dists[1] > dists[2]:
ireorder = (0, 1, 2)
dreorder = (0, 1, 2)
else:
ireorder = (1, 0, 2)
dreorder = (0, 2, 1)
elif featIndices[0] == featIndices[2]:
if dists[0] > dists[2]:
ireorder = (0, 1, 2)
dreorder = (0, 1, 2)
else:
ireorder = (2, 1, 0)
dreorder = (2, 1, 0)
else: # featIndices[1]==featIndices[2]:
if dists[0] > dists[1]:
ireorder = (0, 1, 2)
dreorder = (0, 1, 2)
else:
ireorder = (0, 2, 1)
dreorder = (1, 0, 2)
dists = [dists[x] for x in dreorder]
featIndices = [featIndices[x] for x in ireorder]
return featIndices, dists
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 TickSmith Corp.
#
# Licensed under the MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Provides reusable query structure
'''
import sys
from tickvaultpythonapi.parsing.operation import Operation, BaseOperation
class Predicate(object):
key = ""
operation = ""
value = ""
opClass = Operation() # Defaults to operation, which allows no operations
def __init__(self, key, op, val):
"""
Assign key, operation and value
"""
self.key = key
self.operation = self.get_valid_op(op)
self.value = val
def get_valid_op(self, op):
"""
Uses opClass (subtypes of Operation) to determine whether the
given operation is allowed. If it is, it returns the string that
will be appended to the key name (ex. '>' results in 'Gte', so that the
query will be 'keyGte')
"""
try:
return self.opClass.get_str(op)
except Exception as e:
sys.exit(e)
def get_as_kv_pair(self):
"""
Get as key-value pair
(ex. key = 'price', operation = '!=', value = '50',
result= {"priceNeq" : "50"})
"""
return {self.key + self.operation : str(self.value)}
def get_as_tuple(self):
"""
Get as tuple
(ex. key = 'price', operation = '!=', value = '50',
result= ("priceNeq","50")
"""
return (self.key + self.operation, str(self.value))
def __str__(self):
"""
@Overrride of __str__()
"""
return self.key + self.operation + "=" + str(self.value)
class BasePredicate(Predicate):
# Replace opClass with BaseOperation
opClass = BaseOperation()
# Getter for opClass
@classmethod
def get_op_class(self):
return self.opClass
if __name__ == '__main__':
params = {"param1":"value1"}
bp = BasePredicate("line_type", "=", "T,E")
print(bp.opClass.op_to_str)
p = bp.get_as_kv_pair()
params = {**params, **p}
print(params)
print(BasePredicate("price", ">", 7).get_as_kv_pair())
print(BasePredicate("price", ">=", "a"))
print(BasePredicate("price", "<=", "7").get_as_kv_pair())
print(BasePredicate("price", "!=", "7"))
|
"""
Last edited: January 20 2020
|br| @author: FINE Developer Team (FZJ IEK-3) \n\n
The approaches used are described in
Robinius et. al. (2019) "Robust Optimal Discrete Arc Sizing for Tree-Shaped Potential Networks"
and they are further developed with the help of
Theorem 10 of Labbé et. al. (2019) "Bookings in the European gas market: characterisation of feasibility and
computational complexity results"
and Lemma 3.4 and 3.5 of Schewe et. al. (preprint 2020) "Computing Technical Capacities in the European Entry-Exit
Gas Market is NP-Hard"
"""
import pandas as pd
from FINE import utils
import networkx as nx
import math
import pyomo.environ as py
import warnings
from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition
import numpy as np
import copy
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
import matplotlib as mpl
import shapely as shp
import time
from multiprocessing import Pool
import sys
from functools import partial
try:
import geopandas as gpd
except ImportError:
warnings.warn('The GeoPandas python package could not be imported.')
# local type und value checker
def isPandasDataFrameNumber(dataframe):
# check if dataframe is a pandas dataframe and if each value is float or int
if not isinstance(dataframe, pd.DataFrame):
raise TypeError("The input argument has to be a pandas DataFrame")
else:
if not dataframe.select_dtypes(exclude=["float", "int"]).empty:
raise ValueError("The input pandas DataFrame has to contain only floats or ints")
def isPandasSeriesPositiveNumber(pandasSeries):
# Check if the input argument is a pandas series and it contains only positive numbers
if not isinstance(pandasSeries, pd.Series):
raise TypeError("The input argument has to be a pandas series")
else:
for index in pandasSeries.index:
utils.isPositiveNumber(pandasSeries[index])
def isNetworkxGraph(graph):
# Check if the input argument is a networkx graph
if not isinstance(graph, nx.Graph):
raise TypeError("The input argument has to be a networkx graph")
def isDictionaryPositiveNumber(dictionary):
# Check if the input argument is a dictionary with positive numbers as values
if not isinstance(dictionary, dict):
raise TypeError("The input argument has to be a dictionary")
else:
for key in dictionary.keys():
utils.isPositiveNumber(dictionary[key])
def checkLowerUpperBoundsOfDicts(lowerDict, upperDict):
# check if lowerDict and upperDict have the same keys and if lowerDict[key] <= upperDict[key] holds
if not (lowerDict.keys() == upperDict.keys()):
raise ValueError("The input arguments have to have the same keys")
else:
for key in lowerDict.keys():
if lowerDict[key] > upperDict[key]:
raise ValueError("The lower bound has to be the smaller than the upper bound")
def isListOfStrings(strings):
# check if strings is list of strings
if not isinstance(strings, list):
raise TypeError("The input argument has to be a list")
else:
for string in strings:
utils.isString(string)
def isBool(boolean):
# check if boolean is a bool
if not isinstance(boolean, bool):
raise TypeError("The input argument has to be a bool")
# End utils checks
def getInjectionWithdrawalRates(componentName='', esM=None, operationVariablesOptimumData=None):
"""
Determines the injection and withdrawal rates into a network from a component in an
EnergySystemModel object or based on the fluid flow data.
:param componentName: name of the network component in the EnergySystemModel class
(only required the fluid flows are to be obtained from the EnergySystemModel class)
|br| * the default value is ''
:type componentName: string
:param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be
specified if the operationVariablesOptimumData are to be obtained from the
EnergySystemModel object)
|br| * the default value is None
:type esM: FINE EnergySystemModel
:param operationVariablesOptimumData: the injection and withdrawal rates into and out of the
network can either be obtained from a DataFrame with the original fluid flows or an
EnergySystemModel with an optimized Pyomo instance.
In the former case, the argument is a pandas DataFrame with two index columns (specifying
the names of the start and end node of a pipeline) and one index row (for the time steps).
The data in the DataFrame denotes the flow coming from the start node and going to the end
node [e.g. in kWh or Nm^3]. Example:
0 1 ... 8759
node1 node2 0.1 0.0 ... 0.9
node2 node3 0.0 0.3 ... 0.4
node2 node1 0.9 0.9 ... 0.2
node3 node2 1.1 0.2 ... 0.9
|br| * the default value is None
:type operationVariablesOptimumData: pandas DataFrame with non-negative floats
:return: injection and withdrawal rates (withdrawals from the network are positive while
injections are negative)
:rtype: pandas DataFrame
"""
#TODO check type and value correctness
# Get the original optimal operation variables
if operationVariablesOptimumData is not None:
op = operationVariablesOptimumData
else:
op = esM.componentModelingDict[esM.componentNames[componentName]]. \
getOptimalValues('operationVariablesOptimum')['values'].loc[componentName]
# Get a map of the component's network
if esM is None:
mapN = {}
for conn in operationVariablesOptimumData.index:
loc, loc_ = conn
mapN.setdefault(loc, {}).update({loc_: loc + '_' + loc_})
mapN.setdefault(loc_, {}).update({loc: loc_ + '_' + loc})
else:
mapN = esM.getComponent(componentName)._mapL
# Initialize list for nodal injection and withdrawal time series data
injectionWithdrawalRates, nodeIx = [], []
# Reset connections set (not all indices might be in the operationVariablesOptimumData data)
connections = set()
# For each node loc, compute the injection and withdrawal rates
for loc, locConn in mapN.items():
# As in a few cases zero columns/ rows are dropped from data frames, two lists
# of eligible connection indices are created.
ixIn, ixOut = [], []
for loc_, conn in locConn.items():
if (loc, loc_) in op.index:
ixOut.append((loc, loc_)), connections.add((loc, loc_))
if (loc_, loc) in op.index:
ixIn.append((loc_, loc)), connections.add((loc_, loc))
# If either list has at least one entry, the incoming and outgoing flows are selected
# from the original optimal flow variables and aggregated. The resulting commodity
# withdrawals from the network are positive while injections are negative.
if (len(ixIn) != 0) | (len(ixOut) != 0):
injectionWithdrawalRates.append(op.loc[ixIn].sum() - op.loc[ixOut].sum())
nodeIx.append(loc)
# Concat data to a pandas dataframe
injectionWithdrawalRates = pd.concat(injectionWithdrawalRates, keys=nodeIx, axis=1)
return injectionWithdrawalRates
def getNetworkLengthsFromESM(componentName, esM):
"""
Obtains the pipeline lengths of a transmission component in an EnergySystemModel class.
:param componentName: name of the network component in the EnergySystemModel class
(only required if the fluid flows are to be obtained from the EnergySystemModel class)
|br| * the default value is ''
:type componentName: string
:param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be
specified if the operationVariablesOptimumData are to be obtained from the
EnergySystemModel object)
|br| * the default value is None
:type esM: FINE EnergySystemModel
:return: pipeline distances in the length unit specified in the esM object
:rtype: pandas series
"""
utils.isString(componentName)
utils.isEnergySystemModelInstance(esM)
distances = esM.getComponent(componentName).distances.copy()
indexMap = esM.getComponent(componentName)._mapC
distances.index = [indexMap[ix] for ix in distances.index]
return distances
def getRefinedShapeFile(shapeFilePath, regColumn1, regColumn2, dic_node_minPress, dic_node_maxPress, minPipeLength, maxPipeLength):
"""
If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,
i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1
:param shapeFilePath: path to a shape file which connects the gas injection/ withdrawal nodes with each other. The rows of the
file describe connections between the injection/ withdrawal nodes. The required geometry of these connections is a shapely
LineString. Additionally, the file has two columns holding the names of the two injection/ withdrawal nodes (start and end
point of the LineString).
:type shapeFilePath: string
:param regColumn1: name of the column which holds the name of the injection/ withdrawal node at the beginning of the line
:type regColumn1: string
:param regColumn2: name of the column which holds the name of the injection/ withdrawal node at the end of the line
:type regColumn2: string
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar].
It holds: dic_node_minPress[index] <= dic_node_maxPress[index].
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
:param minPipeLength: desired minimum length of a pipe in [m], note: not always possible to achieve.
:type minPipeLength: positive number
:param maxPipeLength: determines the maximal length of a pipe in [m].
:type maxPipeLength: positive number
:return: distances_new - pipeline distances in m
:rtype: pandas series
:return: dic_node_minPress_new - dictionary that contains for every node of the network its lower pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return: dic_node_maxPress_new - dictionary that contains for every node of the network its upper pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return: gdfNodes - GeoDataFrame with the nodes of the network and their names
:rtype: geopandas GeoDataFrame
:return: gdfEdges - GeoDataFrame with the edges of the network and the names of their start and end nodes
:rtype: geopandas GeoDataFrame
"""
# type and value check
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
utils.isString(regColumn1), utils.isString(regColumn2)
utils.isStrictlyPositiveNumber(maxPipeLength)
utils.isStrictlyPositiveNumber(minPipeLength)
# Read shape file with linestrings connecting the entry/ exit nodes of the gas
gdf=gpd.read_file(shapeFilePath)
if not (gdf.geometry.type == 'LineString').all():
raise ValueError("Geometries of the shape file have to be LineStrings")
print('Number of edges before segmentation:', len(gdf))
originalNodesSet = set(gdf[regColumn1]) | set(gdf[regColumn2])
print('Number of nodes before segmentation:', len(originalNodesSet))
# Obtain nodes from shape file, assign names and minimum/ maximum pressure levels to them, delete duplicates
coordNames, coords = [], []
pMin, pMax = [], []
lines = []
# Break linestrings into linear pieces
for i, row in gdf.iterrows():
# Simplify linestring (to increase the minimum length of pipeline connections wherever possible)
line = row.geometry.simplify(minPipeLength)
lines.append(line)
row.geometry = line
# Get new nodes
coords_ = [i for i in line.coords]
coords.extend(coords_)
coordNames_ = [row[regColumn1]]
coordNames_.extend([row[regColumn1] + '_' + row[regColumn2] + '_' + str(j)
for j in range(len(coords_)-2)])
coordNames_.append(row[regColumn2])
coordNames.extend(coordNames_)
# Get averaged lower and upper pressure levels
pMin.extend([(dic_node_minPress[row[regColumn1]]*(len(coords_)-j-1) +
dic_node_minPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])
pMax.extend([(dic_node_maxPress[row[regColumn1]]*(len(coords_)-j-1) +
dic_node_maxPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])
gdf['geometry'] = lines
# Create DataFrame of old and new nodes and drop duplicates
dfNodes = pd.DataFrame([coordNames, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T
dfNodes = dfNodes.drop_duplicates(subset='lon_lat')
dfNodes = dfNodes.drop_duplicates(subset='nodeName')
# Obtain edges from shape file, assign names to them, delete duplicates
nodesIn_nodesOut = []
nodesIn = []
nodesOut = []
lineStrings = []
for i, row in gdf.iterrows():
coords_ = [i for i in row.geometry.coords]
for j in range(len(coords_)-1):
nodeIn = dfNodes.loc[dfNodes['lon_lat'] == coords_[j],'nodeName'].iloc[0]
nodeOut = dfNodes.loc[dfNodes['lon_lat'] == coords_[j+1],'nodeName'].iloc[0]
nodesIn.append(nodeIn), nodesOut.append(nodeOut)
nodes = [nodeIn,nodeOut]
nodes.sort()
nodesIn_nodesOut.append('edge_' + nodes[0] + '_' + nodes[1])
lineStrings.append(shp.geometry.LineString([coords_[j],coords_[j+1]]))
dfEdges = pd.DataFrame([nodesIn, nodesOut, nodesIn_nodesOut, lineStrings],
index=['nodeIn', 'nodeOut','edgeName','geometry']).T
dfEdges = dfEdges.drop_duplicates(subset='edgeName')
gdfEdges = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})
print('Number of edges after 1. segmentation:', len(gdfEdges))
print('Number of nodes after 1. segmentation:', len(dfNodes))
# Add nodes when line distances are too long
newNodes, newLines, newNodesName, newLinesName = [], [], [], []
nodesIn, nodesOut, coords = [], [], []
pMin, pMax = [], []
for i, row in gdfEdges.iterrows():
# If lines are two long, segment them
if np.round(row['geometry'].length,2) > maxPipeLength:
nbNewNodes = int(np.floor(row['geometry'].length/maxPipeLength))
line = row.geometry
newNodes_, newLines_, newNodesName_, newLinesName_ = [], [], [], []
nodesIn_, nodesOut_, coords_ = [], [], []
pMin_, pMax_ = [], []
nodeStart, nodeEnd = line.interpolate(0), line.interpolate(line.length)
nodeStartName = row['nodeIn']
pMinIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMin'].iloc[0]
pMinOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMin'].iloc[0]
pMaxIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMax'].iloc[0]
pMaxOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMax'].iloc[0]
spacing = row['geometry'].length/(nbNewNodes+1)
for j in range(1,nbNewNodes+1):
newNode = line.interpolate(j*spacing)
newNodes_.append(newNode)
coords_.append((newNode.x, newNode.y))
newNodeName = row['nodeIn'] + '_' + row['nodeOut'] + '_a_' + str(j)
newNodesName_.append(newNodeName)
newLine = shp.geometry.LineString([nodeStart,newNode])
newLines_.append(newLine)
newLinesName_.append('temp'), nodesIn_.append(nodeStartName), nodesOut_.append(newNodeName)
pMin_.append((pMinIn*(nbNewNodes-j+1) + pMinOut*j)/(nbNewNodes+1))
pMax_.append((pMaxIn*(nbNewNodes-j+1) + pMaxOut*j)/(nbNewNodes+1))
nodeStart, nodeStartName = newNode, newNodeName
newLines_.append(shp.geometry.LineString([newNode,nodeEnd]))
newLinesName_.append('temp')
nodesIn_.append(newNodeName), nodesOut_.append(row['nodeOut'])
newNodes.extend(newNodes_), newLines.extend(newLines_), newNodesName.extend(newNodesName_)
newLinesName.extend(newLinesName_), pMin.extend(pMin_), pMax.extend(pMax_)
nodesIn.extend(nodesIn_), nodesOut.extend(nodesOut_), coords.extend(coords_)
if len(newNodes) > 0:
dfNodes = dfNodes.append(pd.DataFrame([newNodesName, pMin, pMax, coords],
index=['nodeName','pMin','pMax','lon_lat']).T)
dfEdges = pd.DataFrame([nodesIn, nodesOut, newLinesName, newLines],
index=['nodeIn', 'nodeOut','edgeName','geometry']).T
gdfEdgesNew = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})
gdfEdges = gdfEdges.append(gdfEdgesNew)
gdfEdges = gdfEdges[gdfEdges.geometry.length.round(2) <= maxPipeLength]
del gdfEdges['edgeName']
renameDict = {name: 'auxNode' + str(i) for i, name in enumerate(dfNodes.nodeName.values)
if name not in originalNodesSet}
for node in originalNodesSet:
renameDict.update({node:node})
gdfEdges['nodeIn'] = gdfEdges.apply(lambda x: renameDict[x['nodeIn']], axis=1)
gdfEdges['nodeOut'] = gdfEdges.apply(lambda x: renameDict[x['nodeOut']], axis=1)
gdfEdges['distances'] = gdfEdges['geometry'].length
print('Number of edges after 2. segmentation:', len(gdfEdges))
dfNodes['nodeName'] = dfNodes.apply(lambda x: renameDict[x['nodeName']], axis=1)
dfNodes['geometry'] = dfNodes.apply(lambda x: shp.geometry.Point(x['lon_lat']), axis=1)
del dfNodes['lon_lat']
gdfNodes = gpd.GeoDataFrame(dfNodes,crs=gdf.crs).to_crs({'init': 'epsg:3035'})
print('Number of nodes after 2. segmentation:', len(gdfNodes))
print('Minimum length [m]:', gdfEdges.distances.min(), 'Maximum length [m]:', gdfEdges.distances.max())
distances_new = pd.Series(gdfEdges['distances'].values,
index = [(n1, n2) for n1, n2 in zip(gdfEdges['nodeIn'],gdfEdges['nodeOut'])])
dic_node_minPress_new = {n:pMin for n, pMin in zip(gdfNodes['nodeName'], gdfNodes['pMin'])}
dic_node_maxPress_new = {n:pMax for n, pMax in zip(gdfNodes['nodeName'], gdfNodes['pMax'])}
return distances_new, dic_node_minPress_new, dic_node_maxPress_new, gdfNodes, gdfEdges
def createNetwork(distances):
"""
Creates undirected network/graph from given distances; updates distances such that
either (u,v) or (v,u) are contained
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:return: graph of the network corresponding to the distances
:rtype: graph object of networkx
:return: pipeline distances in the length unit specified in the esM object
:rtype: pandas series
"""
# type and value check
isPandasSeriesPositiveNumber(distances)
for index in distances.index:
if not isinstance(index, tuple):
raise TypeError("Index of pandas series has to be a tuple")
# first check if distances are consistent, i.e. if (u,v) and (v,u) are in distances they have to have the same
# length and we will delete one of them
# tmp list for reversed edges that we will be delete
tmp_edges = []
for edge in distances.index:
if (edge[1], edge[0]) in distances.index and (edge[1], edge[0]) not in tmp_edges:
assert (distances[edge] == distances[(edge[1], edge[0])])
tmp_edges.append(edge)
# delete tmp_edges because reversed edges are already contained and we consider an undirected graph
distances = distances.drop(tmp_edges)
# get edges for graph
edges = distances.index
# create empty graph
G = nx.Graph()
# create graph from given edges and add length as edge attribute
for edge in edges:
G.add_edge(edge[0], edge[1], length=distances[edge])
return G, distances
def createSteinerTree(graph, distances, inner_nodes):
"""
Computes a steiner tree with minimal sum of pipeline lengths;
updates distances such that only arcs of the spanning tree are contained with corresponding length
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:return spanning tree with sum of lengths of pipelines is minimal
:rtype: graph object of networkx
"""
from networkx.algorithms import approximation
# type and value check
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
# compute spanning tree with minimal sum of pipeline lengths
S = approximation.steiner_tree(graph, terminal_nodes=inner_nodes, weight='length')
# TODO check why function fails when MST function is not called here
S = nx.minimum_spanning_tree(S, weight='length')
# delete edges that are in graph but not in the tree from the distance matrix
edgesToDelete = []
for edge in distances.index:
# check if edge or its reversed edge are contained in the tree
# you have to check both directions because we have an undirected graph
if edge not in S.edges and (edge[1], edge[0]) not in S.edges:
edgesToDelete.append(edge)
distances = distances.drop(edgesToDelete)
return S, distances
def _generateRobustScenarios(startNode_endNode, **kwargs):
startNode = startNode_endNode[0]
endNode = startNode_endNode[1]
return startNode_endNode, computeSingleSpecialScenario(startNode=startNode, endNode=endNode, **kwargs)
def generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress,
solver='glpk', threads=1, verbose=0):
"""
Compute for every node combination a special robust scenario according to Robinius et. al. (2019)
and Labbé et. al. (2019)
:param injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while
injections are negative) for every time step and node; unit [kg/s]
:type: pandas dataframe
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:param threads: number of threads used for parallelization
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
:return dictionary that contains for every node pair a dictionary containing all arc flows of the corresponding
special scenario
:rtype: dictionary key: (node1,node2), value: dictionary: key: arc, value: arc flow in [kg/s]
:return list of entry node
:rtype: list of strings
:return list of exit node
:rtype: list of strings
"""
# Type and value checks
isPandasDataFrameNumber(injectionWithdrawalRates)
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
# get for every entry/exit node the minimal and maximal injection rate and save it in a
# dictionary: key: node, value: min Rate; respectively max Rate in [kg/s]
# we note that inner nodes a handled separately in the computation of the special scenario
dic_nodes_MinCapacity = {}
dic_nodes_MaxCapacity = {}
# list of entry nodes and exit nodes; note node can be in both for example storages
entries = []
exits = []
inners = []
for node in list(injectionWithdrawalRates.columns.values):
minRate = injectionWithdrawalRates[node].min()
maxRate = injectionWithdrawalRates[node].max()
assert (minRate <= maxRate)
dic_nodes_MinCapacity[node] = minRate
dic_nodes_MaxCapacity[node] = maxRate
# if minRate is negative, then node is an entry; if maxRate is positive, then node is an exit
if minRate < 0.0:
entries.append(node)
if maxRate > 0.0:
exits.append(node)
elif maxRate > 0:
exits.append(node)
else:
inners.append(node)
maxPressuresAreEqual = True if len(set(dic_node_maxPress.values())) == 1 else False
p_exits = [dic_node_minPress[exit] for exit in exits]
p_entries_inners = [dic_node_minPress[node] for node in entries]
p_inners = [dic_node_minPress[node] for node in inners]
p_entries_inners.extend(p_inners)
minPressureExitsIsLarger = True if min(p_exits) >= max(p_entries_inners) else False
# compute special scenario for each node combination; see Paper Robinius et. al.(2019); Labbé et. al. (2019)
# save arc flows of special scenarios for each node combination;
# dictionary: key: node pair, value: dictionary: key: arc, value: arc flow
dic_nodePair_flows = {}
if maxPressuresAreEqual and minPressureExitsIsLarger:
if verbose == 0:
print('Reduced robust scenario set can be generated' +
' (pMax is equal at all nodes & pMin at exits is >= at inner and entry nodes).')
nodes = [(startNode, endNode) for startNode in entries for endNode in exits if startNode != endNode]
else:
nodes = [(startNode, endNode) for startNode in graph.nodes for endNode in graph.nodes if startNode != endNode]
pool = Pool(threads)
for i, values in enumerate(pool.imap(partial(_generateRobustScenarios, graph=graph, distances=distances,
entries=entries, exits=exits, dic_nodes_MinCapacity=dic_nodes_MinCapacity,
dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, solver=solver),
nodes), 1):
if verbose == 0:
sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(nodes) * 100)))
dic_nodePair_flows[values[0]] = values[1]
pool.close()
pool.join()
return dic_nodePair_flows, entries, exits
def computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode, dic_nodes_MinCapacity,
dic_nodes_MaxCapacity, specialScenario=True, solver='glpk'):
"""
Compute special robust scenario for given node combination according to Robinius et. al. (2019)
and Labbé et. al. (2019)
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:param entries: list of entry nodes of the network
:type entries: list of strings
:param exits: list of exit nodes of the network
:type exits: list of strings
:param startNode: node of the network (starting node of the special scenario)
:type startNode: string
:param endNode: node of the network (end node of special scenario)
:type endNode: string
:param dic_nodes_MinCapacity: dictionary containing minimal capacity for each node
:type dic_nodes_MinCapacity: dictionary: key: node of the network, value: float
:param dic_nodes_MaxCapacity: dictionary containing maximal capacity for each node
:type dic_nodes_MaxCapacity: dictionary: key: node of the network, value: float
:param specialScenario: bool: True if we compute special robust scenario; False if we compute scenario for fixed
demand vector, e.g., for scenario of a time step
:type specialScenario: bool
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:return dictionary that contains for every arc the corresponding arc flows of the (special) scenario
:rtype: dictionary key: arc, value: arc flow
"""
# Type and value check
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
isListOfStrings(entries)
isListOfStrings(exits)
utils.isString(startNode)
utils.isString(endNode)
if isinstance(dic_nodes_MinCapacity, dict) and isinstance(dic_nodes_MaxCapacity, dict):
if not (dic_nodes_MinCapacity.keys() == dic_nodes_MaxCapacity.keys()):
raise TypeError("Dictionaries for min and max capacity need same keys")
for node in dic_nodes_MinCapacity.keys():
if not (isinstance(dic_nodes_MinCapacity[node], float) or isinstance(dic_nodes_MinCapacity[node], int)):
raise TypeError("The input argument has to be an number")
if not (isinstance(dic_nodes_MaxCapacity[node], float) or isinstance(dic_nodes_MaxCapacity[node], int)):
raise TypeError("The input argument has to be an number")
if dic_nodes_MaxCapacity[node] < dic_nodes_MinCapacity[node]:
raise ValueError("minimal node capacity has to be equal or smaller than maximal node capacity")
else:
raise TypeError("dic_nodes_MinCapacity and dic_nodes_MinCapacity have to be dictionaries")
isBool(specialScenario)
# we build concrete Pyomo Model
model = py.ConcreteModel()
# Description model: we have a simple directed graph. We allow negative flows because a pipe can be used in both
# directions by the flows
model.Nodes = py.Set(initialize=graph.nodes)
# important to use distances.keys() instead of graph.edges such that we do not have key errors later on because
# the edges in graph are undirected and in distances.keys() directed
model.Arcs = py.Set(initialize=distances.keys(), dimen=2)
# create demand variables for every node;
# if specialScenario is true, then we compute special scenario, i.e. entry/exit demand variables are bounded by
# min(0,minimal_capacity) <= demandVariable <= max(0, maximal_capacity)
# demand variables for inner nodes are set to zero
# if specialScenario is false, the demand variable is just bounded by the minimal and maximal capacity
if specialScenario:
def demandCapacities(model, node):
if node in entries or node in exits:
return min(0, dic_nodes_MinCapacity[node]), max(0, dic_nodes_MaxCapacity[node])
else:
return 0, 0
model.Demand = py.Var(model.Nodes, bounds=demandCapacities)
else:
# we do not compute special scenarios; we just compute flows for given, possibly fixed, demands
def demandCapacities(model, node):
return dic_nodes_MinCapacity[node], dic_nodes_MaxCapacity[node]
model.Demand = py.Var(model.Nodes, bounds=demandCapacities)
# create arc flow variables for every arc of the network
model.Flow = py.Var(model.Arcs)
# compute NodesOut, i.e., set of nodes that are connected to considered node by outgoing arc
def nodes_out_init(model, node):
retval = []
for (i, j) in model.Arcs:
if i == node:
retval.append(j)
return retval
model.NodesOut = py.Set(model.Nodes, initialize=nodes_out_init)
# compute NodesIn, i.e., set of nodes connected to considered node by ingoing arc
def nodes_in_init(model, node):
retval = []
for (i, j) in model.Arcs:
if j == node:
retval.append(i)
return retval
model.NodesIn = py.Set(model.Nodes, initialize=nodes_in_init)
# add flow balance constraints corresponding to the node demands
def flow_balance_rule(model, node):
return sum(model.Flow[i, node] for i in model.NodesIn[node]) \
- sum(model.Flow[node, j] for j in model.NodesOut[node]) \
== model.Demand[node]
model.FlowBalance_cons = py.Constraint(model.Nodes, rule=flow_balance_rule)
# compute unique flow-path P(startNode,endNode) from entry to exit; given by list of nodes of the path
pathNodes = nx.shortest_path(graph, source=startNode, target=endNode)
# non zero coefficients of objective function
dic_arc_coef = {}
# determine coefficients for objective function
# if for an arc (u,v), u, respectively v, are not in pathNodes, then the coefficient is 0
# if arc (u,v) of pathNodes satisfies P(startNode, u) subset P(startNode,v), then coefficient is 1, otherwise -1
for index in range(0, len(pathNodes) - 1):
# check which direction of the arc is contained in the graph
if (pathNodes[index], pathNodes[index + 1]) in model.Arcs:
dic_arc_coef[(pathNodes[index], pathNodes[index + 1])] = 1
else:
dic_arc_coef[(pathNodes[index + 1], pathNodes[index])] = -1
# we set objective
def obj_rule(model):
return sum(dic_arc_coef[arc] * model.Flow[arc] for arc in dic_arc_coef.keys())
model.Obj = py.Objective(rule=obj_rule, sense=py.maximize)
# Create a solver
opt = SolverFactory(solver)
# Solve optimization model
results = opt.solve(model)
# status of solver
status = results.solver.status
# termination condition
termCondition = results.solver.termination_condition
# save the solution of the flows in a dictionary key: arcs, values: flow
dic_scenario_flow = {}
if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:
utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +
'. No output is generated.', 0, 0)
elif termCondition == TerminationCondition.infeasibleOrUnbounded or \
termCondition == TerminationCondition.infeasible or \
termCondition == TerminationCondition.unbounded:
utils.output('Optimization problem is ' + str(termCondition) +
'. No output is generated.', 0, 0)
else:
# If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown
# status), show a warning message.
if not termCondition == TerminationCondition.optimal:
warnings.warn('Output is generated for a non-optimal solution.')
# dic_arcScenario has key (v,w,scenario) and value flow will be needed for MIP
for arc in model.Arcs:
dic_scenario_flow[arc] = model.Flow[arc].value
return dic_scenario_flow
def computeLargeMergedDiameters(dic_subSetDiam_costs, nDigits=6):
"""
Compute merged diameters, i.e. compute equivalent single diameter for two looped pipes.
:param dic_subSetDiam_costs: dictionary containing diameters in [m] and costs in [Euro/m]
:type: dictionary: key: diameter, value: costs
:param nDigits: number of digits used in the round function
|br| * the default value is 6
:type nDigits: positive int
:return dic_newDiam_costs: dictionary containing merged diameters in [m] and costs in [Euro/m]
:rtype: dictionary: key: diameter, value: costs
:return dic_newDiam_oldDiam: dictionary matching new diameters to old diameters
:rtype: dictionary: key: new diameter, value: corresponding old diameter, which will be used in the looped pipe
"""
# Type and value check
if isinstance(dic_subSetDiam_costs, dict):
for diam in dic_subSetDiam_costs.keys():
utils.isStrictlyPositiveNumber(diam)
utils.isStrictlyPositiveNumber(dic_subSetDiam_costs[diam])
else:
raise TypeError("The input has to be a dictionary")
utils.isStrictlyPositiveInt(nDigits)
dic_newDiam_costs = {}
dic_newDiam_oldDiam = {}
for diam in dic_subSetDiam_costs.keys():
# compute new diameter in [m] and its costs in [Euro/m]
# for Formula see (1) in Paper Reuß et. al.
# since at current state we consider the diameter for a looped pipe the above is
# equivalent to 2^(2/5) * diam and thus, we do not have to transform diam from [m] to [mm]
newDiam = ((diam ** (5 / 2) + diam ** (5 / 2)) ** (2 / 5)).__round__(nDigits)
# costs are two times costs of diam because newDiam represents two looped pipe with diameter diam
newCosts = 2 * dic_subSetDiam_costs[diam]
dic_newDiam_costs[newDiam] = newCosts
dic_newDiam_oldDiam[newDiam] = diam
return dic_newDiam_costs, dic_newDiam_oldDiam
def determinePressureDropCoef(dic_scenario_flows, distances, dic_node_minPress, dic_node_maxPress,
diameters, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965, nDigits=6):
"""
Compute for each scenario, diameter, and each arc the corresponding pressure drop
:param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all
arc flows in [kg/s] of the corresponding (special) scenario
:type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param diameters: list of diameters in [m]
:type: list of strictly positive numbers
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float; optional
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float; optional
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float; optional
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float; optional
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float; optional
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float; optional
:param nDigits: number of digits used in the round function
|br| * the default value is 6
:type nDigits: positive int; optional
:return dictionary that contains for every scenario and diameter the corresponding pressure drops
:rtype: dictionary key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop
"""
# check type and value
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
isPandasSeriesPositiveNumber(distances)
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
if isinstance(diameters, list):
for diam in diameters:
utils.isPositiveNumber(diam)
else:
raise TypeError("Diameters has to be a list")
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
utils.isStrictlyPositiveInt(nDigits)
# compute for each diameter, scenario, and arc its pressure drop
# save results in dic: key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop
dic_pressureDropCoef = {}
for diameter in diameters:
for nodePair in dic_scenario_flows.keys():
# initialize dictionary
dic_pressureDropCoef[(diameter, nodePair)] = {}
# compute cross section of considered pipe and diameter
tmpvalue_A = 0.25 * np.pi * diameter ** 2
for arc in dic_scenario_flows[nodePair].keys():
# check if flow is unequal to zero
if dic_scenario_flows[nodePair][arc] != 0.0:
# Compute approximation of average pressure flow in pipe (u,v) by
# if flow((u,v)) is positive then set p_min to lower pressure bound of v and p_max to
# upper pressure bound u
# if flow((u,v)) is negative then set p_min to lower pressure bound of u and p_max to
# upper pressure bound v
if dic_scenario_flows[nodePair][arc] > 0:
p_min = dic_node_minPress[arc[1]]
p_max = dic_node_maxPress[arc[0]]
else:
p_min = dic_node_minPress[arc[0]]
p_max = dic_node_maxPress[arc[1]]
# compute approximation of average pressure
p_m = (2 / 3) * (p_max + p_min - (p_max * p_min) / (p_max + p_min))
# approximation for density
rho = 0.11922 * p_m ** 0.91192 - 0.17264
# approximation of the realgasfactor
Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050
K_m = Z_m / Z_n
# approximation of the dynamic viscosity
eta = 1.04298 * 10 ** (-10) * p_m ** 1.53560 + 8.79987 * 10 ** (-6)
nue = eta / rho
# compute velocity
tmpvalue_w = (abs(dic_scenario_flows[nodePair][arc]) / rho) / tmpvalue_A
# compute reynolds number
tmpvalue_Re = tmpvalue_w * (diameter / nue)
tmpvalue_alpha = np.exp(-np.exp(6.75 - 0.0025 * tmpvalue_Re))
tmpvalue_Lambda = (64 / tmpvalue_Re) * (1 - tmpvalue_alpha) + tmpvalue_alpha * (
-2 * np.log10(2.7 * (np.log10(tmpvalue_Re) ** 1.2 / tmpvalue_Re) + ir / (3.71 * 1000 *
diameter))) ** (-2)
# note p_n is in [bar] instead of [PA], thus we divide tmpvalue_C by 10**5
# explanation: we have p_i^2-p_j^2=C. If p_i is in [PA] and we want p_i in [bar] then this leads to
# (p_i/10^5)^2-(p_j/10^5)^2=C/10^10
# but we changed p_n in computation C from [PA] to [bar] hence we only divide C by 10^5
tmpvalue_C_bar = tmpvalue_Lambda * 16 * rho_n * T_m * p_n * K_m / (np.pi ** 2 * T_n * 10 ** 5)
# compute final pressure drop coefficient depending on the flow
tmp_value_C_coef = (distances[arc] / rho_n ** 2) * \
(tmpvalue_C_bar * dic_scenario_flows[nodePair][arc] *
abs(dic_scenario_flows[nodePair][arc]) / diameter ** 5)
# save pressure drop for considered diameter, scenario, and arc
dic_pressureDropCoef[(diameter, nodePair)][arc] = tmp_value_C_coef
else:
dic_pressureDropCoef[(diameter, nodePair)][arc] = 0
return dic_pressureDropCoef
def determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureDropCoef, specialScenarioNames,
dic_node_minPress, dic_node_maxPress, dic_diam_costs, robust=True,
solver='glpk', threads=4, verbose=0):
"""
Model of optimal pipeline sizing (diameter selection) w.r.t. to the given scenarios
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_pressureDropCoef: dictionary that contains for every scenario and diameter the
corresponding pressure drops in [bar]
:type dic_pressureDropCoef: dictionary: keys: scenarioName; value: dict: key: arc, value: pressure drop in [bar]
:param specialScenarioNames: list of names of scenarios. In robust case tuples (startNode, endNode).
:type specialScenarioNames: list of tuples in the robust case, otherwise list of time Steps
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param dic_diam_costs: dictionary that contains for every diameter in [m] its costs [Euro/m]
:type dic_diam_costs: dictionary key: diameter, value: non-negative float
:param robust: Bool that is true, if we optimize w.r.t. robust scenarios, otherwise False.
:type robust: bool
:return dictionary that contains for every arc the optimal diameter in [m]
:rtype dictionary: key: arc, value: optimal diameter
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:param threads: number of threads used for optimization (if gurobi is used)
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
:return dictionary that contains for every scenario the corresponding pressure levels
:rtype dictionary: key: scenarioName, value: dict: key: node, value: pressure level of node
"""
# type and value checks
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
if not isinstance(dic_pressureDropCoef, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(specialScenarioNames, list):
if robust:
for scenario in specialScenarioNames:
isinstance(scenario, tuple)
else:
raise TypeError("The input argument has to be a list")
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
if isinstance(dic_diam_costs, dict):
for diam in dic_diam_costs.keys():
utils.isStrictlyPositiveNumber(diam)
utils.isStrictlyPositiveNumber(dic_diam_costs[diam])
else:
raise TypeError("The input has to be a dictionary")
if not isinstance(robust, bool):
raise TypeError("The input has to be a bool")
utils.isString(solver)
utils.isPositiveNumber(verbose)
# set list of available diameters
diameters = dic_diam_costs.keys()
# build concrete pyomo model
model = py.ConcreteModel()
# sets for nodes, arcs, diameters, scenarios
model.nodes = py.Set(initialize=graph.nodes)
model.arcs = py.Set(initialize=list(distances.keys()), dimen=2)
# diameters assuming that each pipe has the same diameter options
model.diameters = py.Set(initialize=diameters)
# if we have special scenarios, scenario names are tuples, otherwise not
if robust:
# set indices for each scenario by its nodePair = (startnode, endnode)
model.scenarios = py.Set(initialize=specialScenarioNames, dimen=2)
else:
# set indices for each timeStep number
model.scenarios = py.Set(initialize=specialScenarioNames, dimen=1)
# create variables binaries x are the same for each scenario
# pressure variables are different for each scenario
model.x = py.Var(model.arcs, model.diameters, domain=py.Binary)
if robust:
def pressureBounds(model, node, startnode, endnode):
return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2
model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)
else:
def pressureBounds(model, node, timeStep):
return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2
model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)
# objective: minimize the costs
def obj_rule(model):
return sum(
sum(dic_diam_costs[diam] * distances[arc] * model.x[arc, diam] for diam in model.diameters)
for arc in model.arcs)
model.Obj = py.Objective(rule=obj_rule)
# pressure drop for each cons and each scenario
if robust:
def pressure_drop(model, arc0, arc1, scenarioStart, scenarioEnd):
return model.pi[arc1, (scenarioStart, scenarioEnd)] - model.pi[arc0, (scenarioStart, scenarioEnd)] == \
-sum(dic_pressureDropCoef[(diam, (scenarioStart, scenarioEnd))][(arc0, arc1)] *
model.x[arc0, arc1, diam] for diam in model.diameters)
model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_drop)
else:
def pressure_dropNotRobust(model, arc0, arc1, timeStep):
return model.pi[arc1, timeStep] - model.pi[arc0, timeStep] == \
-sum(dic_pressureDropCoef[(diam, timeStep)][(arc0, arc1)] *
model.x[arc0, arc1, diam] for diam in model.diameters)
model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_dropNotRobust)
# ensure that a single diameter per arc is chosen
def selection_diameter(model, arc0, arc1):
return sum(model.x[arc0, arc1, diam] for diam in model.diameters) == 1
model.SelectionDiameter_cons = py.Constraint(model.arcs, rule=selection_diameter)
# Create a solver
opt = SolverFactory(solver)
# Set the specified solver options
# Solve optimization problem. The optimization solve time is stored and the solver information is printed.
if (verbose == 2) & (solver == 'gurobi'):
optimizationSpecs = ' LogToConsole=0'
opt.set_options('Threads=' + str(threads) + optimizationSpecs)
results = opt.solve(model, tee=True, keepfiles=False)
else:
results = opt.solve(model, tee=True, report_timing=True, keepfiles=False)
# status of solver
status = results.solver.status
# termination condition
termCondition = results.solver.termination_condition
# write diameter solution to dictionary: key: arc, value: optimal diameter
# write pressure solutions to dictionary; key: scenarioName, value: dict: key: node, value: pressure level in [bar]
dic_arc_diam = {}
dic_scen_node_press = {}
if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:
utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +
'. No output is generated.', 0, 0)
elif termCondition == TerminationCondition.infeasibleOrUnbounded or \
termCondition == TerminationCondition.infeasible or \
termCondition == TerminationCondition.unbounded:
utils.output('Optimization problem is ' + str(termCondition) +
'. No output is generated.', 0, 0)
else:
# If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown
# status), show a warning message.
if not termCondition == TerminationCondition.optimal:
warnings.warn('Output is generated for a non-optimal solution.')
# initialize dict with empty dict
for scenario in specialScenarioNames:
dic_scen_node_press[scenario] = {}
for v in model.component_objects(py.Var, active=True):
varobject = getattr(model, str(v))
for index in varobject:
# round because sometimes we are nearly one
if str(varobject) == 'x' and round(varobject[index].value) == 1:
dic_arc_diam.update({(index[0], index[1]): index[2]})
elif str(varobject) == 'pi':
if robust:
# need sqrt() because in model pressure is quadratic because of the transformation
dic_scen_node_press[(index[1], index[2])].update({index[0]: np.sqrt(varobject[index].value)})
else:
# need sqrt() because in model pressure is quadratic because of the transformation
dic_scen_node_press[(index[1])].update({index[0]: np.sqrt(varobject[index].value)})
return dic_arc_diam, dic_scen_node_press
def _postprocessing(scenario, dic_scenario_flows, graph, **kwargs):
dic_scen_PressLevel = {}
dic_scen_MaxViolPress = math.inf
# copy a list of nodes
tmp_nodes = copy.deepcopy(list(graph.nodes))
# we now set iteratively the pressure level of a single node to its upper pressure bound and then compute the
# unique pressure levels until we find valid pressure levels or have tested all nodes
while tmp_nodes:
# we have not found valid pressure levels for this scenario
# temporary pressure levels
dic_tmp_pressure = {}
for node in list(graph.nodes):
dic_tmp_pressure[node] = None
# choose the node which pressure level is fixed to the upper pressure bound
current_node = tmp_nodes[0]
validation, tmp_viol = computePressureAtNode(graph=graph, node=current_node, nodeUpperBound=current_node,
dic_scenario_flows=dic_scenario_flows[scenario], dic_node_pressure=dic_tmp_pressure, **kwargs)
# if validation true, then we have feasible pressure levels; empty list of nodes that have to be
# considered
if validation:
tmp_nodes = []
# we have feasible pressure level and save them
dic_scen_PressLevel = dic_tmp_pressure
dic_scen_MaxViolPress = tmp_viol
else:
# remove considered entry from list of nodes that will be considered for fixing the pressure level
tmp_nodes.remove(tmp_nodes[0])
# we update the maximal pressure level violation
if tmp_viol < dic_scen_MaxViolPress:
# save currently best pressure levels
dic_scen_PressLevel = copy.deepcopy(dic_tmp_pressure)
dic_scen_MaxViolPress = tmp_viol
return scenario, dic_scen_PressLevel, dic_scen_MaxViolPress
def postprocessing(graph, distances, dic_arc_diam, dic_scenario_flows, dic_node_minPress, dic_node_maxPress,
threads=1, verbose=0):
""""
Compute "more" accurate pressure levels for the considered scenarios in the network with optimal diameters
Apply postprocessing of Master's thesis with adaption that we possibly consider every node for fixing its
pressure level to the upper pressure bound.
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all
arc flows in [kg/s] of the corresponding (special) scenario
:type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
:param threads: number of threads used for parallelization
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:return: dictionary that contains for every scenario the corresponding pressure levels in [bar]
:rtype: dictionary key: scenarioName, value: dic: key: arc, value pressure level
:return: dictionary that contains for every scenario the maximal pressure bound violation in [bar]
:rtype: dictionary key: scenarioName, value: float = maximal pressure bound violation
"""
# Type and value check
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for diam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
else:
raise TypeError("The input has to be a dictionary")
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
# best found pressure levels for scenarios; dic key: scenario, value: dic: key: node, value: pressure level in [bar]
dic_scen_PressLevel = {}
# maximal violation of pressure bounds; zero if no violation exists; dic: key: scenario, value: pressure violation
dic_scen_MaxViolPress = {}
# we compute "precise" pressure levels for every scenarios
pool = Pool(threads)
scenarios = [scenario for scenario in dic_scenario_flows.keys()]
for i, values in enumerate(pool.imap(partial(_postprocessing, validation=True, graph=graph, dic_arc_diam=dic_arc_diam,
distances=distances, dic_node_minPress=dic_node_minPress, dic_node_maxPress=dic_node_maxPress, tmp_violation=0,
dic_scenario_flows=dic_scenario_flows), scenarios), 1):
if verbose == 0:
sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(scenarios) * 100)))
dic_scen_PressLevel[values[0]] = values[1]
dic_scen_MaxViolPress[values[0]] = values[2]
pool.close()
pool.join()
return dic_scen_PressLevel, dic_scen_MaxViolPress
def computePressureAtNode(validation, node, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows,
dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure,
ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965, nDigits=6):
""""
Compute pressure levels recursive for given scenario and node that is fixed to its upper pressure level
:param validation: boolean that is False, if the computed pressure levels are infeasible
:rtype validation: bool
:param node: node of the network for which we currently consider for computing the pressure levels
:type node: str
:param nodeUpperBound: node which pressure level is fixed to the upper bound
:type node: str
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]
:type: dictionary: key: arc, value: arc flow
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param tmp_violation: violation of the current pressure bounds in [bar]
:type tmp_violation: float
:param dic_node_pressure: dictionary that contains node pressure levels in [bar]
:type dic_node_pressure: dictionary key: node of the network, value: non-negative float
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
:param nDigits: number of digits used in the pandas round function. Is applied to the
specified or determined injection and withdrawal rates.
|br| * the default value is 6
:type nDigits: positive int
:return validation: boolean that is true, if the computed pressure levels are feasible
:rtype: bool
:return maximal violation of the pressure bounds w.r.t. the computed pressure levels in [bar]
:rtype: float
"""
# Type and value check
isBool(validation)
utils.isString(node)
utils.isString(nodeUpperBound)
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for diam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
else:
raise TypeError("The input has to be a dictionary")
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
utils.isPositiveNumber(tmp_violation)
if not isinstance(dic_node_pressure, dict):
raise TypeError("The Input has to a dictionary")
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
utils.isStrictlyPositiveInt(nDigits)
# if node is equal to nodeUpperBound, we fix its pressure level to the upper bound; base case in recursion
if node == nodeUpperBound:
dic_node_pressure[node] = dic_node_maxPress[node]
# list of arcs
arcs = list(distances.keys())
# we now compute the neighbors of the considered node
neighbors = graph.neighbors(node)
# compute pressure levels for neighbor nodes
for neighbor in neighbors:
# check if pressure is already computed
if dic_node_pressure[neighbor] is None:
# check if (node,neighbor) or (neighbor,node) is in graph
if (node, neighbor) in arcs:
# check flow direction for arc (node,neighbor)
if dic_scenario_flows[(node, neighbor)] >= 0.0:
# we know pressure level of beginning node of arc; compute pressure level for end node of arc
dic_node_pressure[neighbor] = computePressureEndnodeArc((node, neighbor), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam, distances,
ir, rho_n, T_m, T_n, p_n, Z_n)
else:
# we know pressure level of endnode
dic_node_pressure[neighbor] = computePressureStartnodeArc((node, neighbor), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam,
distances,
ir, rho_n, T_m, T_n, p_n, Z_n,
tol=10 ** (- nDigits))
else:
# we know that arc (neighbor,node) is contained in the graph
# check flow direction
if dic_scenario_flows[(neighbor, node)] <= 0.0:
# we know pressure of start node
dic_node_pressure[neighbor] = computePressureEndnodeArc((neighbor, node), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam, distances,
ir, rho_n, T_m, T_n, p_n, Z_n)
else:
# we know pressure level of end node
dic_node_pressure[neighbor] = computePressureStartnodeArc((neighbor, node), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam,
distances,
ir, rho_n, T_m, T_n, p_n, Z_n,
tol=10 ** (- nDigits))
# check if new computed pressure level is feasible
if dic_node_pressure[neighbor] == - math.inf:
# pressure violation is really high
tmp_violation = math.inf
return False, tmp_violation
# check if we violate pressure bounds for neighbor node
if dic_node_pressure[neighbor] < dic_node_minPress[neighbor] \
or dic_node_pressure[neighbor] > dic_node_maxPress[neighbor]:
# pressure level is not valid
validation = False
# update pressure bound violation
if dic_node_pressure[neighbor] < dic_node_minPress[neighbor]:
# update violation and violation node if it is bigger
if tmp_violation is None or \
abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) > tmp_violation:
tmp_violation = abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor])
else:
if tmp_violation is None or \
abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) > tmp_violation:
tmp_violation = abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor])
# compute value for neighbor of tmp
validation, tmp_violation = computePressureAtNode(validation, neighbor, nodeUpperBound, graph, dic_arc_diam,
distances,
dic_scenario_flows, dic_node_minPress, dic_node_maxPress,
tmp_violation, dic_node_pressure)
return validation, tmp_violation
def computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2,
rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965, tol=10 ** (-4)):
""""
For given arc and pressure level of endNode compute the pressure of the startNode by solving the corresponding
equation system
:param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas
:type arc: tuple
:param pressureEndNode: pressure level of endNode
:type pressureEndNode: non-negative float
:param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]; note arc flow of arc has to be
positive
:type: dictionary: key: arc, value: arc flow
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
:param tol: tolerance to which accuracy we solve the equation system
|br| * the default value is 10^-4
:type tol: non-negative float
:return: pressure level of startNode in [bar]
:rtype: float
"""
# Type and Value check
if not isinstance(arc, tuple):
raise TypeError("The input has to be a tuple")
utils.isStrictlyPositiveNumber(pressureEndNode)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for diam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
isPandasSeriesPositiveNumber(distances)
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
utils.isStrictlyPositiveNumber(tol)
if dic_scenario_flows[arc] == 0.0:
return pressureEndNode
# define function of nonlinear equation system f(x) = pressure_start^2-pressure_end^2-C
# because then root is our valid pressure level solution, because we know pressure_end
def f(pressure_start):
d = dic_arc_diam[arc]
A = 0.25 * math.pi * d ** 2
rho_in = 0.11922 * pressure_start ** 0.91192 - 0.17264
V_in = abs(dic_scenario_flows[arc]) / rho_in
w_in = V_in / A
eta_in = 1.04298 * 10 ** (-10) * pressure_start ** 1.53560 + 8.79987 * 10 ** (-6)
nue_in = eta_in / rho_in
Re_in = w_in * (d / nue_in)
alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))
Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(
(2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +
ir / (3.71 * 1000 * d))) ** (-2)
C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)
# note pressure_start is in bar
p_m = pressure_start - C_tilde / 10 ** 5
if p_m < 0.0:
# pressure drop too large no valid pressure assignment possible
return -math.inf
Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050
K_m = Z_m / Z_n
# note flow direction is given by startnode endnode so we square the arcflow
C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (
math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * dic_scenario_flows[arc] ** 2
return pressure_start ** 2 - pressureEndNode ** 2 - C
# find root of f, start value pressure_end + 0.5(bar)
# x = fsolve(f, pressureEndNode + 0.5)
# pressureEndnode + guess for solution depending on flow; you can replace this guess by the approximation of the
# pressure drop of the MIP to probably achieve better results
x = fsolve(f, pressureEndNode + 0.5 * (dic_scenario_flows[arc] ** 2) / (dic_arc_diam[arc] ** 5))
# check if tolerance is ok
assert isinstance(tol, float)
# check tolerance of first solution
if f(x[0]) <= tol:
# value is ok
# because x is an array return first entry, we only have one solution for the nonlinear equation system
return x[0]
else:
print('nonlinear equation system failed')
# this warning means we could not solve the system, this could be the case if the pressure drop is too large
# or when the start value for the nonlinear equation solver is too far away from the solution
print("Nonlinear equation system in Postprocessing failed. Try another node which pressure level is"
" set to the upper bound")
return -math.inf
def computePressureEndnodeArc(arc, pressureStartNode, dic_scenario_flows, dic_arc_diam, distances,
ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965):
""""
For given arc and pressure level of startNode compute the pressure of the endNode
:param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas
:type arc: tuple
:param pressureStartNode: pressure level of endNode
:type pressureStartNode: non-negative float
:param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]
:type: dictionary: key: arc, value: arc flow
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
:return: pressure level of endNode in [bar]
:rtype: float
"""
# Type and Value check
if not isinstance(arc, tuple):
raise TypeError("The input has to be a tuple")
utils.isStrictlyPositiveNumber(pressureStartNode)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for diam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
isPandasSeriesPositiveNumber(distances)
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
arcFlow = dic_scenario_flows[arc]
if arcFlow != 0:
d = dic_arc_diam[arc]
A = 0.25 * math.pi * d ** 2
rho_in = 0.11922 * pressureStartNode ** 0.91192 - 0.17264
V_in = abs(arcFlow) / rho_in
w_in = V_in / A
eta_in = 1.04298 * 10 ** (-10) * pressureStartNode ** 1.53560 + 8.79987 * 10 ** (-6)
nue_in = eta_in / rho_in
Re_in = w_in * (d / nue_in)
alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))
Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(
(2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +
ir / (3.71 * 1000 * d))) ** (-2)
C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)
# note pressure_start is in bar
p_m = pressureStartNode - C_tilde / 10 ** 5
if p_m < 0.0:
# pressure drop too large no valid pressure assignment possible
return -math.inf
Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050
K_m = Z_m / Z_n
# note flow direction is given by startnode endnode so we square the arcflow
C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (math.pi ** 2 * T_n * rho_n * 10 ** 5 *
dic_arc_diam[arc] ** 5) * arcFlow ** 2
else:
# flow is zero therefore pressure drop is zero
C = 0
if pressureStartNode ** 2 - C >= 0:
return math.sqrt(pressureStartNode ** 2 - C)
else:
# pressure drop is too big return negative value, which is a invalid pressure value
return -math.inf
def _computeTimeStepFlows(index, injectionWithdrawalRates, graph, **kwargs):
# compute flows corresponding to demand by fixing demand for every node to given value and then compute
# flows by LP
dic_nodes_MinCapacity = {}
dic_nodes_MaxCapacity = {}
activeNodes = injectionWithdrawalRates.columns
for node in graph.nodes:
if node in activeNodes:
dic_nodes_MinCapacity[node] = injectionWithdrawalRates.at[index, node]
dic_nodes_MaxCapacity[node] = injectionWithdrawalRates.at[index, node]
else:
dic_nodes_MinCapacity[node] = 0
dic_nodes_MaxCapacity[node] = 0
# compute flows
return index, computeSingleSpecialScenario(dic_nodes_MinCapacity=dic_nodes_MinCapacity,
dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, graph=graph, **kwargs)
def computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, threads=1, verbose=0, solver='glpk'):
""""
Compute for each timeStep and demands given by injectionWithdrawalRates the corresponding flow values
:param: injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while
injections are negative) in [kg^3/s]
:type injectionWithdrawalRates: pandas DataFrame
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param entries: list of entry nodes of the network
:type entries: list of str
:param exits: list of exit nodes of the network
:type exits: list of str
:param threads: number of threads used for parallelization
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:return: dictionary that contains for every time step the corresponding flows in [kg/s]
:rtype: dictionary key: timeStep, value: dict: key: arc, value: arc flow
"""
# Type and value check
isPandasDataFrameNumber(injectionWithdrawalRates)
isPandasSeriesPositiveNumber(distances)
isNetworkxGraph(graph)
isListOfStrings(entries)
isListOfStrings(exits)
# compute for every time step the corresponding flows; dict: key: timeStep, value: dict: key: arc, value: flow
dic_timeStep_flows = {}
# nodes with nonzero demand are given by columns of dataframe
activeNodes = injectionWithdrawalRates.columns
pool = Pool(threads)
indexList = list(injectionWithdrawalRates.index)
for i, values in enumerate(pool.imap(partial(_computeTimeStepFlows, graph=graph, distances=distances,
entries=entries, exits=exits, startNode=activeNodes[0],
endNode=activeNodes[1], specialScenario=False,
injectionWithdrawalRates=injectionWithdrawalRates,
solver=solver),
indexList), 1):
if verbose == 0:
sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(indexList) * 100)))
dic_timeStep_flows[values[0]] = values[1]
pool.close()
pool.join()
return dic_timeStep_flows
def networkRefinement(distances, maxPipeLength, dic_node_minPress, dic_node_maxPress):
"""
If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,
i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1
# TODO this function is only used for testing
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:param maxPipeLength: determines the maximal length of a pipe in [m].
:type maxPipeLength: positive number
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:return: graph of the network corresponding to the distances
:rtype: graph object of networkx
:return: pipeline distances in the length unit specified in the esM object
:rtype: pandas series
:return: dic_node_minPress dictionary that contains for every node of the network its lower pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return dic_node_maxPress dictionary that contains for every node of the network its upper pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
"""
# type and value check
isPandasSeriesPositiveNumber(distances)
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
if maxPipeLength is not None:
utils.isStrictlyPositiveNumber(maxPipeLength)
# if maximal pipeline length is a positive number we apply the refinement
if maxPipeLength is not None:
# we have to check if pipes satisfy maximal pipeline length
# list of new arcs that will be added
newPipes = []
# list of lengths of new added pipes
newPipesLengths = []
# list of split original pipes
splitEdges = []
for edge in distances.index:
# get length of pipeline
pipeLength = distances[edge]
if pipeLength > maxPipeLength:
# compute number of necessary artificial nodes
nArtificialNodes = math.ceil(pipeLength / maxPipeLength) - 1
# compute length of new pipelines
newPipeLength = float(pipeLength / (math.ceil(pipeLength / maxPipeLength)))
# lower and upper pressure bound for new nodes computed by average of nodes of original edge
lowPress = (dic_node_minPress[edge[0]] + dic_node_minPress[edge[1]]) / 2
maxPress = (dic_node_maxPress[edge[0]] + dic_node_maxPress[edge[1]]) / 2
# add first new pipe and its length
newPipes.append((edge[0], "v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])))
# add length of first new pipe
newPipesLengths.append(newPipeLength)
# add lower and upper bound for new artificial node
dic_node_minPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress
dic_node_maxPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress
# add intermediate artificial pipes, its length, and lower/upper pressure bounds
for index in range(1, nArtificialNodes):
newPipes.append(("v" + str(index) + "_" + str(edge[0]) + "_" + str(edge[1]),
"v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])))
newPipesLengths.append(newPipeLength)
dic_node_minPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress
dic_node_maxPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress
# add last new pipe and its length
newPipes.append(("v" + str(nArtificialNodes) + "_" + str(edge[0]) + "_" + str(edge[1]),
edge[1]))
newPipesLengths.append(newPipeLength)
# add edge to split edges
splitEdges.append(edge)
# Now delete edges that have been split
distances = distances.drop(splitEdges)
# Add new edges
distances = distances.append(pd.Series(newPipesLengths, index=newPipes))
# get edges for graph
edges = distances.index
# create empty graph
G = nx.Graph()
# create graph from given edges and add length as edge attribute
for edge in edges:
G.add_edge(edge[0], edge[1], length=distances[edge])
return G, distances, dic_node_minPress, dic_node_maxPress
def determineDiscretePipelineDesign(robust, injectionWithdrawalRates, distances, dic_node_minPress, dic_node_maxPress,
dic_diameter_costs=None, dic_candidateMergedDiam_costs=None,
gdfEdges=None, regColumn1='nodeIn', regColumn2='nodeOut', solver='glpk',
opexForDiameters=None, economicLifetime=30, interestRate=0.08, costUnit='€', ir=0.2,
rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965,
originalFluidFlows=None, nDigits=6, verbose=0, threads=1):
"""
We compute a robust (depending on parameter robust) optimal pipeline design,
i.e. for a given network, we compute a minimal spanning tree w.r.t. its total length.
Afterward, we compute our robust (special) scenarios, see Robinius et. al..
Also we compute for every timeStep of injectionWithdrawalRates the corresponding flows.
We compute merged diameters according to list candidatesMergedDiameter, i.e. we compute a equivalent single diameter
for two parallel pipes with the same diameter
If robust is True, then we compute the corresponding pressure drops for every diameter and robust scenario.
If robust is False, then we compute for every timeStep the corresponding pressure drops for every diameter and
timeStep.
If robust is True, then we compute optimal diameters by a MIP for the robust scenarios.
If robust is False, then we compute optimal diameters by a MIP for the timeStep scenarios. Not Robust Version!
In a postprocessing step, we compute "precise" pressure levels for the robust scenarios and the timeStep scenarios.
Note that if robust is False, then the network may be infeasible for robust scenarios
which can occur in the network!
:param robust: Bool that is true, we build a robust pipeline network, otherwise not
:type robust: bool
:param injectionWithdrawalRates: the argument is a pandas DataFrame with the index column
denoting the timesteps and the index row denoting the name of the network's nodes.
Injection are denoted with negative floats and withdrawal with positive floats
in [kg/s]. Example:
node1 node2 node3
0 -4 2 2
1 3 -1.5 -1.5
... ... ... ...
8759 0 -1 1.
:type injectionWithdrawalRates: pandas DataFrame with floats
:param distances: the parameter is a pandas Series with the indices being tuples of the
network's nodes and the values being the lengths of the pipelines in [m]. Example:
(node1, node2) 1000
(node2, node3) 50000
(node2, node1) 1000
(node3, node2) 50000
:type distances: pandas Series
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param dic_diameter_costs: dictionary that contains all diameters in [m] as keys and the values are the
corresponding costs in [Euro/m]. Default Value is a preselection of diameters and its costs.
if None, then we chose the following preselection of diameters and costs
dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,
0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,
0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,
0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,
1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}
:type dic_diameter_costs: dict with keys: diameters, values: cost for pipeline; optional
:param dic_candidateMergedDiam_costs: dictionary that contains a set of diameters in [m] as keys and
the values are the corresponding costs in [Euro/m]. This diameters are then used to compute a single equivalent
diameter for two looped (parallel) pipes with the considered diameter.
|br| * the default value is empty dictionary {}
:type dic_candidateMergedDiam_costs: dict with keys: diameters, values: cost for pipeline; optional
:param gdfEdges: GeoDataFrame with the edges of the network and the names of their start and end nodes.
Required for geo-referenced result visualization. Should be obtained from the getRefinedShapeFile
function.
:type gdfEdges: GeoDataFrame or None: optional, default is None
:param regColumn1: name of the column in gdfEdges which holds the name of the injection/ withdrawal node
at the beginning of the line. Required if gdfEdges is specified.
:type regColumn1: string, optional, default is 'nodeIn'
:param regColumn2: name of the column in gdfEdges which holds the name of the injection/ withdrawal node
at the end of the line. Required if gdfEdges is specified.
:type regColumn2: string, optional, default is 'nodeOut'
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
# TODO @Juelich where to use
param originalFluidFlows: string that specifies the considered fluid
|br| * the default value is None
:type originalFluidFlows: str; optional
:param nDigits: number of digits used in the round function
|br| * the default value is 6
:type nDigits: positive int
:param verbose: defines how verbose the console logging is:\n
- 0: general model logging, warnings and optimization solver logging are displayed.
- 1: warnings are displayed.
- 2: no general model logging or warnings are displayed, the optimization solver logging is set to a
minimum.\n
Note: if required, the optimization solver logging can be separately enabled in the optimizationSpecs
of the optimize function.
|br| * the default value is 0
:type verbose: integer (0, 1 or 2)
:return: tuple (dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels,
dic_timeStep_MaxViolPress, gdfEdges), with:
- dic_arc_optimalDiameters dictionary
- pressure levels of postprocessing of robust scenarios dic_scen_PressLevels
- violation of pressure bounds of robust scenarios in optimized network determined by postprocessing
- dic_scen_MaxViolPress: maximum pressure violation in robust scenarios
- pressure levels of postprocessing of timeSteps dic_timeStep_PressLevels
- violation of pressure bounds of timeStep scenarios in optimized network determined by postprocessing
- dic_timeStep_MaxViolPress: maximum pressure violation in timestep scenarios
- geopandas GeoDataFrame (information about diameters in 'diam' column and number of pipelines in
'nbPipes'); None if kwarg gdfEdges was specified as being Node
:rtype: return types:
- dic_arc_optimalDiameters: dictionary, key: arcs, values: (numberOfPipes, diameter) note usually numberOfPipes
is 1, but if we have chosen a merged diameter, then we have two parallel pipes with the same diameter,
i.e. numberOfPipes is 2.
- dic_scen_PressLevels: dictionary, key: nodePair, value: dict: key: arc, value: pressure level in [bar]
- dic_scen_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number
(zero means no pressure violation)
- dic_timeStep_PressLevels: dictionary, key: timeStep, value: dict: key: arc, value: pressure level in [bar]
- dic_timeStep_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number
(zero means no pressure violation)
- gdfEdges: geopandas geodataframe; None if kwarg gdfEdges was specified as being Node
"""
# Do type and value check of input data:
isBool(robust)
isPandasDataFrameNumber(injectionWithdrawalRates)
isPandasSeriesPositiveNumber(distances)
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
# extract diameters for the optimization
if dic_diameter_costs is not None:
if isinstance(dic_diameter_costs, dict):
diameters = list(dic_diameter_costs.keys())
if isinstance(diameters, list):
for diam in diameters:
utils.isStrictlyPositiveNumber(diam)
else:
raise TypeError("The input argument has to be a list")
isDictionaryPositiveNumber(dic_diameter_costs)
if dic_candidateMergedDiam_costs is not None:
if isinstance(dic_candidateMergedDiam_costs, dict):
for diam in dic_candidateMergedDiam_costs.keys():
utils.isStrictlyPositiveNumber(diam)
utils.isPositiveNumber(dic_candidateMergedDiam_costs[diam])
else:
raise TypeError("The input argument has to be a list")
utils.isString(regColumn1), utils.isString(regColumn2)
if gdfEdges is not None:
if isinstance(gdfEdges, gpd.GeoDataFrame):
if (not regColumn1 in gdfEdges.columns) | (not regColumn2 in gdfEdges.columns):
raise ValueError("regColumn1 or regColumn2 not in columns of gdfEdges")
else:
gdfEdges['nodes'] = gdfEdges.apply(lambda x: (x['nodeIn'], x['nodeOut']), axis=1)
else:
raise TypeError("gdfEdges has to be a geopandas GeoDataFrame.")
if opexForDiameters is not None:
if isinstance(opexForDiameters, list):
for opex in opexForDiameters:
utils.isPositiveNumber(opex)
else:
raise TypeError("The input argument has to be a list")
utils.isPositiveNumber(interestRate)
utils.isStrictlyPositiveNumber(economicLifetime)
utils.isString(costUnit)
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
if originalFluidFlows is not None:
utils.isString(originalFluidFlows)
utils.isStrictlyPositiveInt(nDigits)
if dic_diameter_costs is None:
print("There are no diameters to choose in the optimization. Thus, we consider the diameters and costs:")
dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,
0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,
0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,
0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,
1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}
print(dic_diameter_costs)
# create graph with respect to distances
utils.output('Creating graph with respect to given distances', verbose, 0)
graph, distances = createNetwork(distances)
# plot graph
if verbose < 1:
if gdfEdges is not None:
gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]
fig, ax = plt.subplots(figsize=(4,4))
gdfEdges.plot(ax=ax, color='k'), ax.axis('off')
else:
utils.output("Original Network Graph:", verbose, 0)
nx.draw(graph, with_labels=True)
plt.show()
# Create a minimum spanning tree of the network with a reasonable logic
utils.output('Creating a Steiner treee', verbose, 0)
inner_nodes = list(injectionWithdrawalRates.columns)
graph, distances = createSteinerTree(graph, distances, inner_nodes)
utils.output("Steiner tree:", verbose, 0)
if verbose < 1:
if gdfEdges is not None:
gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]
fig, ax = plt.subplots(figsize=(4,4))
gdfEdges.plot(ax=ax, color='k'), ax.axis('off')
else:
nx.draw(graph, with_labels=True)
plt.show()
# Compute robust scenarios for spanning tree network
utils.output("Compute robust scenario set for tree network (based on " +
str(len(graph.nodes)*len(graph.nodes)-len(graph.nodes)) +
' node combinations). Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_nodePair_flows, entries, exits = generateRobustScenarios(injectionWithdrawalRates, graph, distances,
dic_node_minPress, dic_node_maxPress, solver=solver, threads=threads, verbose=verbose)
utils.output("Number of robust scenarios: " + str(len(dic_nodePair_flows.keys())) , verbose, 0)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
# Compute scenarios for timeSteps
utils.output("Compute scenarios for each timestep. Number of timestep scenarios: "
+ str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_timeStep_flows = computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits,
solver=solver, threads=threads, verbose=verbose)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
# Compute equivalent single diameters for looped (parallel) pipes
utils.output("Compute equivalent single diameters for looped (parallel) pipes", verbose, 0)
# dic_LoopedDiam_costs contains the new computed diameters and its costs
dic_LoopedDiam_costs = None
# dic_newDiam_oldDiam merges new and old diameters
dic_newDiam_oldDiam = None
if dic_candidateMergedDiam_costs is not None:
dic_LoopedDiam_costs, dic_newDiam_oldDiam = computeLargeMergedDiameters(dic_candidateMergedDiam_costs)
# merge all diameters to one dictionary for the optimization model
dic_diameter_costs.update(dic_LoopedDiam_costs)
# Compute pressure drops for each scenario and diameter and the compute optimal diameters
# depending on robust, we do this w.r.t. robust scenarios or every timeStep
# dictionary for the pressure coefficients
dic_pressureCoef = {}
# dictionary for the optimal diameters
dic_arc_diam = {}
if robust:
# we compute the pressure drops for the robust scenarios
utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0)
dic_pressureCoef = determinePressureDropCoef(dic_nodePair_flows, distances, dic_node_minPress,
dic_node_maxPress, list(dic_diameter_costs.keys()))
specialScenarionames = list(dic_nodePair_flows.keys())
# Determine optimal discrete pipeline selection by solving a MIP w.r.t. the robust scenarios
utils.output('Determining optimal robust pipeline design under the consideration of pressure ' +
'losses and robust scenarios', verbose, 0)
# returns dict: key: arc, value: optimal diameter
# returns dict: key: nodePair, value: dic: key: node, value: pressure level
dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,
specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, robust, verbose=verbose,
solver=solver, threads=threads)
else:
# we compute pressure drops for every timeStep scenario. Not robust version!
# we compute the pressure drops for the robust scenarios and optimize
utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0)
dic_pressureCoef = determinePressureDropCoef(dic_timeStep_flows, distances, dic_node_minPress,
dic_node_maxPress, list(dic_diameter_costs.keys()))
timeSteps = list(dic_timeStep_flows.keys())
# Determine optimal discrete pipeline selection by solving a MIP w.r.t. the timeStep scenarios
utils.output('Determining optimal pipeline design under the consideration of pressure losses and every time step',
verbose, 0)
utils.output('This network design is necessarily robust!', verbose, 0)
# returns dict: key: arc, value: optimal diameter
# returns dict: key: timeStep, value: dic: key: node, value: pressure level
dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,
timeSteps, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, False, verbose=verbose,
solver=solver, threads=threads)
if not dic_arc_diam:
utils.output("No feasible diameter selections exits", verbose, 0)
return None
# Do postprocessing: Use a "more" accurate pressure model and apply Postprocessing of master's thesis:
# first do postprocessing for special scenarios
utils.output("Do postprocessing for robust (special) scenarios. Number of scenarios: " + str(len(dic_nodePair_flows)) +
'. Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_scen_PressLevels, dic_scen_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_nodePair_flows,
dic_node_minPress, dic_node_maxPress,
threads=threads, verbose=verbose)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
# print if some of these scenarios are not feasible for the "more" precise pressure model
for scenario in dic_scen_MaxViolPress.keys():
if dic_scen_MaxViolPress[scenario] > 0:
utils.output("Robust Scenario " + str(scenario) + " violates pressure bounds by " +
str(dic_scen_MaxViolPress[scenario]), verbose, 0)
# compute pressure levels for each time step
utils.output("Do postprocessing for each timestep scenarios. Number of scenarios: " +
str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_timeStep_PressLevels, dic_timeStep_MaxViolPress = postprocessing(graph, distances, dic_arc_diam,
dic_timeStep_flows, dic_node_minPress,
dic_node_maxPress,
threads=threads, verbose=verbose)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
for timeStep in dic_timeStep_MaxViolPress.keys():
if dic_timeStep_MaxViolPress[timeStep] > 0:
utils.output("Time Step " + str(timeStep) + " violates pressure bounds by " +
str(dic_timeStep_MaxViolPress[timeStep]), verbose, 0)
# now determine final output, i.e. dictionary: key: arcs, values: (numberOfPipes, diameter)
# note usually numberOfPipes is 1, but if we have chosen a merged diameter, then we have two parallel pipes with
# the same diameter, i.e. numberOfPipes is 2.
dic_arc_optimalDiameters = {}
for arc in dic_arc_diam.keys():
if dic_LoopedDiam_costs is not None:
if dic_arc_diam[arc] in dic_LoopedDiam_costs.keys():
dic_arc_optimalDiameters[arc] = (2, dic_newDiam_oldDiam[dic_arc_diam[arc]])
else:
dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])
else:
dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])
if verbose < 1:
if gdfEdges is not None:
gdfEdges = gdfEdges[gdfEdges.nodes.isin(dic_arc_optimalDiameters)]
gdfEdges['diam'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][1], axis=1)
gdfEdges['nbPipes'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][0], axis=1)
plotOptimizedNetwork(gdfEdges)
else:
# plot network with new diameters
utils.output("Network with optimized diameters, looped pipes are indicated by two colored edges, " +
"Thicker edge means larger diameter", verbose, 0)
finalG = nx.MultiGraph()
for arc in dic_arc_optimalDiameters.keys():
if dic_arc_optimalDiameters[arc][0] == 1:
# we have a single not looped pipe
finalG.add_edge(arc[0], arc[1], color='black', weight=5 * dic_arc_optimalDiameters[arc][1])
else:
# we have a looped pipe
finalG.add_edge(arc[0], arc[1], color='r',
weight=10 * dic_arc_optimalDiameters[arc][1])
finalG.add_edge(arc[0], arc[1], color='b',
weight=5 * dic_arc_optimalDiameters[arc][1])
# pos = nx.circular_layout(finalG)
edges = finalG.edges()
colors = []
weight = []
for (u, v, attrib_dict) in list(finalG.edges.data()):
colors.append(attrib_dict['color'])
weight.append(attrib_dict['weight'])
nx.draw(finalG, edges=edges, edge_color=colors, width=weight, with_labels=True)
plt.show()
# Add some output which somehow quantifies the difference between the original and the new
# pipeline design (for this additional input argument are required)
# TODO @ Juelich just compare original solution to solution dic_arc_optimalDiameters
return dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels, \
dic_timeStep_MaxViolPress, gdfEdges
def plotOptimizedNetwork(gdf_pipes, figsize=(4,4), nodesColumn='nodes', diamColumn='diam',
nbPipesColumn='nbPipes', line_scaling=1, gdf_regions=None, pressureLevels=None, pMin=50, pMax=100,
cmap='Spectral_r', cbxShift=0.32, cbyShift=0.08, cbWidth=0.4, fontsize=10, cbTitle='Pressure [bar]'):
"""Plot optimized network, visualizing chosen pipe diameters and, if selected, pressure levels of
a scenario.
:param gdf_pipes: GeoDataFrame, containing information about the diameters, number of pipes and
routes of the pipeline network
:type gdf_pipes: geopandas GeoDataFrame
:param figsize: figure size, defaults to (4,4)
:type figsize: tuple, optional
:param nodesColumn: name of the column in gdf_pipes containing a tuple (startNode, endNode) with the
name of the nodes being strings, defaults to 'nodes'
:type nodesColumn: str, optional
:param diamColumn: name of the column in gdf_pipes containing the diameters of the pipelines in m,
defaults to 'diam'
:type diamColumn: str, optional
:param nbPipesColumn: name of the column in gdf_pipes containing the number of parallel pipes along
a connection (maximum parallel pipes: 2),
defaults to 'nbPipes'
:type nbPipesColumn: str, optional
:param line_scaling: scaling factor for line width, defaults to 1
:type line_scaling: int, optional
:param gdf_regions: GeoDataFrame for background plotting, defaults to None
:type gdf_regions: geopandas GeoDataFrame, optional
:param pressureLevels: pressure levels at each node for one scenario/ timestep, defaults to None
:type pressureLevels: dictionary or series with keys/ indices being the nodes of the network, optional
:param pMin: minimum pressure of colorbar, defaults to 50
:type pMin: int, optional
:param pMax: maximum pressure of colorbar, defaults to 100
:type pMax: int, optional
:param cmap: colormap name, defaults to 'Spectral_r'
:type cmap: str, optional
:param cbxShift: colorbar x shift, defaults to 0.32
:type cbxShift: float, optional
:param cbyShift: colorbar y shift, defaults to 0.08
:type cbyShift: float, optional
:param cbWidth: colorbar width, defaults to 0.4
:type cbWidth: float, optional
:param fontsize: fontsize of legend and colorbar, defaults to 10
:type fontsize: int, optional
:param cbTitle: colorbar title, defaults to 'Pressure [bar]'
:type cbTitle: str, optional
:return: tuple (fig, ax)
:rtype:
- fig: matplotlib figure
- ax: matplotlib axis
"""
fig, ax = plt.subplots(figsize=figsize)
cmap = mpl.cm.get_cmap(cmap)
if gdf_regions is not None:
gdf_regions.plot(ax=ax, facecolor='lightgrey', edgecolor='lightgrey')
diamMin = gdf_pipes[gdf_pipes[diamColumn] > 0][diamColumn].min()
for i, row in gdf_pipes.iterrows():
lw = row[diamColumn]/diamMin*line_scaling
if pressureLevels is not None:
p = (pressureLevels[row[nodesColumn][0]] + pressureLevels[row[nodesColumn][1]])/2
color = cmap((p-pMin)/(pMax-pMin))
else:
color='k'
if (row[nbPipesColumn] == 1):
gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw, capstyle='round')
else:
gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw*3, capstyle='round')
gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color='white', linewidth=lw)
ax.axis('off')
lines = []
for diam in sorted(gdf_pipes[diamColumn].unique()):
line = plt.Line2D(range(1), range(1), linewidth=diam/diamMin*line_scaling, color='k', marker='_',
label="{:>1.5}".format(str(diam)) + ' m')
lines.append(line)
leg = ax.legend(handles=lines, prop={'size': fontsize}, loc=6, bbox_to_anchor=(1,0.5), title='Diameters')
leg.get_frame().set_edgecolor('white')
if pressureLevels is not None:
sm1 = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=pMin, vmax=pMax))
sm1._A = []
cax = fig.add_axes([cbxShift, cbyShift, cbWidth, 0.03])
cb1 = fig.colorbar(sm1, cax=cax, pad=0.05, aspect=7, fraction=0.07, orientation='horizontal')
cax.tick_params(labelsize=fontsize)
cax.set_xlabel(cbTitle, size=fontsize)
cb1.ax.xaxis.set_label_position('top')
plt.show()
return fig, ax
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionsOperations:
"""SubscriptionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.subscriptions.v2016_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_locations(
self,
subscription_id: str,
**kwargs
) -> AsyncIterable["models.LocationListResult"]:
"""Gets all available geo-locations.
This operation provides all the locations that are available for resource providers; however,
each resource provider may support a subset of this list.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2016_06_01.models.LocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-06-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_locations.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} # type: ignore
async def get(
self,
subscription_id: str,
**kwargs
) -> "models.Subscription":
"""Gets details about a specified subscription.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subscription, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2016_06_01.models.Subscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Subscription"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-06-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["models.SubscriptionListResult"]:
"""Gets all subscriptions for a tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubscriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2016_06_01.models.SubscriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SubscriptionListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-06-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SubscriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions'} # type: ignore
|
from array import*
# in array all the value have to be in same type
# array in python , dont have fixed size means they are flexible
vls = array('i',[5,34,54,23,26,32,2]) ## i = unsigned int
# copying array
newArr = array(vls.typecode,(a for a in vls)) # fetch one value form vls and assigned in array
for e in newArr:
print(e,end=" ")
# 5 34 54 23 26 32 2
print()
# size of an array
print(vls.buffer_info() )
# (20113184, 7)
# (address ,size)
# acessing array
for i in vls:
print(i)
print(vls[1])
while i<len(newArr):
print(newArr[i])
i+=1
# array data type
print(vls.typecode) # properties
## reversing array
vls.reverse()
print(vls)
#array('i', [2, 32, 26, 23, 54, 34, 5])
## manual
l=len(vls)
vls= array(vls.typecode,[vls[i-1] for i in range(l,0,-1)]) #
print(vls)
## taking value from user
arr = array('i',[])
l=int(input("Enter the length of the array"))
for i in range(l): # it loop == length specified by the user
v=int(input("ENTER THE NEXT VLAUE")) # value from user
arr.append(v) # append at the end of the array
print(arr)
## serching for the index no
## manual method
i=int(input('enter the value for index no'))
k=0
error=0
for e in arr:
if e==i:
error+=1
break # if value is match with array value ,then loops end
k+=1
if error!=0:
print(k)
else:
print('sorry the value you are looking for is not avilable ')
## by python inbuilt function
print(arr.index(i))
|
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from os.path import join as pjoin
from memory import ReplayMemory, Transition, State
from model import DRRN
from util import *
import logger
import sentencepiece as spm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DRRN_Agent:
def __init__(self, args):
self.gamma = args.gamma
self.batch_size = args.batch_size
self.sp = spm.SentencePieceProcessor()
self.sp.Load(args.spm_path)
self.network = DRRN(len(self.sp), args.embedding_dim, args.hidden_dim).to(device)
self.memory = ReplayMemory(args.memory_size)
self.save_path = args.output_dir
self.clip = args.clip
self.optimizer = torch.optim.Adam(self.network.parameters(),
lr=args.learning_rate)
def observe(self, state, act, rew, next_state, next_acts, done):
self.memory.push(state, act, rew, next_state, next_acts, done)
def build_state(self, obs, infos):
""" Returns a state representation built from various info sources. """
obs_ids = [self.sp.EncodeAsIds(o) for o in obs]
look_ids = [self.sp.EncodeAsIds(info['look']) for info in infos]
inv_ids = [self.sp.EncodeAsIds(info['inv']) for info in infos]
return [State(ob, lk, inv) for ob, lk, inv in zip(obs_ids, look_ids, inv_ids)]
def encode(self, obs_list):
""" Encode a list of observations """
return [self.sp.EncodeAsIds(o) for o in obs_list]
def act(self, states, poss_acts, sample=True):
""" Returns a string action from poss_acts. """
idxs, values = self.network.act(states, poss_acts, sample)
act_ids = [poss_acts[batch][idx] for batch, idx in enumerate(idxs)]
return act_ids, idxs, values
def update(self):
if len(self.memory) < self.batch_size:
return
transitions = self.memory.sample(self.batch_size)
batch = Transition(*zip(*transitions))
# Compute Q(s', a') for all a'
# TODO: Use a target network???
next_qvals = self.network(batch.next_state, batch.next_acts)
# Take the max over next q-values
next_qvals = torch.tensor([vals.max() for vals in next_qvals], device=device)
# Zero all the next_qvals that are done
next_qvals = next_qvals * (1-torch.tensor(batch.done, dtype=torch.float, device=device))
targets = torch.tensor(batch.reward, dtype=torch.float, device=device) + self.gamma * next_qvals
# Next compute Q(s, a)
# Nest each action in a list - so that it becomes the only admissible cmd
nested_acts = tuple([[a] for a in batch.act])
qvals = self.network(batch.state, nested_acts)
# Combine the qvals: Maybe just do a greedy max for generality
qvals = torch.cat(qvals)
# Compute Huber loss
loss = F.smooth_l1_loss(qvals, targets.detach())
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.network.parameters(), self.clip)
self.optimizer.step()
return loss.item()
def load(self):
try:
self.memory = pickle.load(open(pjoin(self.save_path, 'memory.pkl'), 'rb'))
self.network = torch.load(pjoin(self.save_path, 'model.pt'))
except Exception as e:
print("Error saving model.")
logging.error(traceback.format_exc())
def save(self):
try:
pickle.dump(self.memory, open(pjoin(self.save_path, 'memory.pkl'), 'wb'))
torch.save(self.network, pjoin(self.save_path, 'model.pt'))
except Exception as e:
print("Error saving model.")
logging.error(traceback.format_exc())
|
"""
This module has all the tools necessary for me to handle a knowledge logic problem
"""
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Tests for ``llnl/util/filesystem.py``"""
import llnl.util.filesystem as fs
import os
import stat
import pytest
@pytest.fixture()
def stage(tmpdir_factory):
"""Creates a stage with the directory structure for the tests."""
s = tmpdir_factory.mktemp('filesystem_test')
with s.as_cwd():
# Create source file hierarchy
fs.touchp('source/1')
fs.touchp('source/a/b/2')
fs.touchp('source/a/b/3')
fs.touchp('source/c/4')
fs.touchp('source/c/d/5')
fs.touchp('source/c/d/6')
fs.touchp('source/c/d/e/7')
# Create symlinks
os.symlink(os.path.abspath('source/1'), 'source/2')
os.symlink('b/2', 'source/a/b2')
os.symlink('a/b', 'source/f')
# Create destination directory
fs.mkdirp('dest')
yield s
class TestCopy:
"""Tests for ``filesystem.copy``"""
def test_file_dest(self, stage):
"""Test using a filename as the destination."""
with fs.working_dir(str(stage)):
fs.copy('source/1', 'dest/1')
assert os.path.exists('dest/1')
def test_dir_dest(self, stage):
"""Test using a directory as the destination."""
with fs.working_dir(str(stage)):
fs.copy('source/1', 'dest')
assert os.path.exists('dest/1')
def check_added_exe_permissions(src, dst):
src_mode = os.stat(src).st_mode
dst_mode = os.stat(dst).st_mode
for perm in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH]:
if src_mode & perm:
assert dst_mode & perm
class TestInstall:
"""Tests for ``filesystem.install``"""
def test_file_dest(self, stage):
"""Test using a filename as the destination."""
with fs.working_dir(str(stage)):
fs.install('source/1', 'dest/1')
assert os.path.exists('dest/1')
check_added_exe_permissions('source/1', 'dest/1')
def test_dir_dest(self, stage):
"""Test using a directory as the destination."""
with fs.working_dir(str(stage)):
fs.install('source/1', 'dest')
assert os.path.exists('dest/1')
check_added_exe_permissions('source/1', 'dest/1')
class TestCopyTree:
"""Tests for ``filesystem.copy_tree``"""
def test_existing_dir(self, stage):
"""Test copying to an existing directory."""
with fs.working_dir(str(stage)):
fs.copy_tree('source', 'dest')
assert os.path.exists('dest/a/b/2')
def test_non_existing_dir(self, stage):
"""Test copying to a non-existing directory."""
with fs.working_dir(str(stage)):
fs.copy_tree('source', 'dest/sub/directory')
assert os.path.exists('dest/sub/directory/a/b/2')
def test_parent_dir(self, stage):
"""Test copying to from a parent directory."""
# Make sure we get the right error if we try to copy a parent into
# a descendent directory.
with pytest.raises(ValueError, matches="Cannot copy"):
with fs.working_dir(str(stage)):
fs.copy_tree('source', 'source/sub/directory')
# Only point with this check is to make sure we don't try to perform
# the copy.
with pytest.raises(IOError, matches="No such file or directory"):
with fs.working_dir(str(stage)):
fs.copy_tree('foo/ba', 'foo/bar')
def test_symlinks_true(self, stage):
"""Test copying with symlink preservation."""
with fs.working_dir(str(stage)):
fs.copy_tree('source', 'dest', symlinks=True)
assert os.path.exists('dest/2')
assert os.path.islink('dest/2')
assert os.path.exists('dest/a/b2')
with fs.working_dir('dest/a'):
assert os.path.exists(os.readlink('b2'))
assert (os.path.realpath('dest/f/2') ==
os.path.abspath('dest/a/b/2'))
assert os.path.realpath('dest/2') == os.path.abspath('dest/1')
def test_symlinks_true_ignore(self, stage):
"""Test copying when specifying relative paths that should be ignored
"""
with fs.working_dir(str(stage)):
ignore = lambda p: p in ['c/d/e', 'a']
fs.copy_tree('source', 'dest', symlinks=True, ignore=ignore)
assert not os.path.exists('dest/a')
assert os.path.exists('dest/c/d')
assert not os.path.exists('dest/c/d/e')
def test_symlinks_false(self, stage):
"""Test copying without symlink preservation."""
with fs.working_dir(str(stage)):
fs.copy_tree('source', 'dest', symlinks=False)
assert os.path.exists('dest/2')
assert not os.path.islink('dest/2')
class TestInstallTree:
"""Tests for ``filesystem.install_tree``"""
def test_existing_dir(self, stage):
"""Test installing to an existing directory."""
with fs.working_dir(str(stage)):
fs.install_tree('source', 'dest')
assert os.path.exists('dest/a/b/2')
def test_non_existing_dir(self, stage):
"""Test installing to a non-existing directory."""
with fs.working_dir(str(stage)):
fs.install_tree('source', 'dest/sub/directory')
assert os.path.exists('dest/sub/directory/a/b/2')
def test_symlinks_true(self, stage):
"""Test installing with symlink preservation."""
with fs.working_dir(str(stage)):
fs.install_tree('source', 'dest', symlinks=True)
assert os.path.exists('dest/2')
assert os.path.islink('dest/2')
def test_symlinks_false(self, stage):
"""Test installing without symlink preservation."""
with fs.working_dir(str(stage)):
fs.install_tree('source', 'dest', symlinks=False)
assert os.path.exists('dest/2')
assert not os.path.islink('dest/2')
def test_move_transaction_commit(tmpdir):
fake_library = tmpdir.mkdir('lib').join('libfoo.so')
fake_library.write('Just some fake content.')
old_md5 = fs.hash_directory(str(tmpdir))
with fs.replace_directory_transaction(str(tmpdir.join('lib'))):
fake_library = tmpdir.mkdir('lib').join('libfoo.so')
fake_library.write('Other content.')
new_md5 = fs.hash_directory(str(tmpdir))
assert old_md5 != fs.hash_directory(str(tmpdir))
assert new_md5 == fs.hash_directory(str(tmpdir))
def test_move_transaction_rollback(tmpdir):
fake_library = tmpdir.mkdir('lib').join('libfoo.so')
fake_library.write('Just some fake content.')
h = fs.hash_directory(str(tmpdir))
try:
with fs.replace_directory_transaction(str(tmpdir.join('lib'))):
assert h != fs.hash_directory(str(tmpdir))
fake_library = tmpdir.mkdir('lib').join('libfoo.so')
fake_library.write('Other content.')
raise RuntimeError('')
except RuntimeError:
pass
assert h == fs.hash_directory(str(tmpdir))
@pytest.mark.regression('10601')
@pytest.mark.regression('10603')
def test_recursive_search_of_headers_from_prefix(
installation_dir_with_headers
):
# Try to inspect recursively from <prefix> and ensure we don't get
# subdirectories of the '<prefix>/include' path
prefix = str(installation_dir_with_headers)
header_list = fs.find_all_headers(prefix)
# Check that the header files we expect are all listed
assert os.path.join(prefix, 'include', 'ex3.h') in header_list
assert os.path.join(prefix, 'include', 'boost', 'ex3.h') in header_list
assert os.path.join(prefix, 'path', 'to', 'ex1.h') in header_list
assert os.path.join(prefix, 'path', 'to', 'subdir', 'ex2.h') in header_list
# Check that when computing directories we exclude <prefix>/include/boost
include_dirs = header_list.directories
assert os.path.join(prefix, 'include') in include_dirs
assert os.path.join(prefix, 'include', 'boost') not in include_dirs
assert os.path.join(prefix, 'path', 'to') in include_dirs
assert os.path.join(prefix, 'path', 'to', 'subdir') in include_dirs
@pytest.mark.parametrize('list_of_headers,expected_directories', [
(['/pfx/include/foo.h', '/pfx/include/subdir/foo.h'], ['/pfx/include']),
(['/pfx/include/foo.h', '/pfx/subdir/foo.h'],
['/pfx/include', '/pfx/subdir']),
(['/pfx/include/subdir/foo.h', '/pfx/subdir/foo.h'],
['/pfx/include', '/pfx/subdir'])
])
def test_computation_of_header_directories(
list_of_headers, expected_directories
):
hl = fs.HeaderList(list_of_headers)
assert hl.directories == expected_directories
def test_headers_directory_setter():
hl = fs.HeaderList(
['/pfx/include/subdir/foo.h', '/pfx/include/subdir/bar.h']
)
# Set directories using a list
hl.directories = ['/pfx/include/subdir']
assert hl.directories == ['/pfx/include/subdir']
# If it's a single directory it's fine to not wrap it into a list
# when setting the property
hl.directories = '/pfx/include/subdir'
assert hl.directories == ['/pfx/include/subdir']
# Paths are normalized, so it doesn't matter how many backslashes etc.
# are present in the original directory being used
hl.directories = '/pfx/include//subdir/'
assert hl.directories == ['/pfx/include/subdir']
# Setting an empty list is allowed and returns an empty list
hl.directories = []
assert hl.directories == []
# Setting directories to None also returns an empty list
hl.directories = None
assert hl.directories == []
|
import abc
import csv
from collections import namedtuple, defaultdict, OrderedDict, Counter
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity as sim
from sklearn.pipeline import Pipeline
STOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'}
Synset = namedtuple('Synset', 'id synonyms hypernyms bag')
class Inventory(object):
"""Sense inventory representation and loader."""
synsets = {}
index = defaultdict(list)
def __init__(self, inventory_path):
"""
During the construction, BaseWSD parses the given sense inventory file.
"""
def field_to_bag(field):
return {word: freq for record in field.split(', ')
for word, freq in (self.lexeme(record),)
if record}
with open(inventory_path, 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
id = row[0]
synonyms = field_to_bag(row[2])
hypernyms = field_to_bag(row[4])
self.synsets[id] = Synset(
id=id,
synonyms=synonyms,
hypernyms=hypernyms,
bag={**synonyms, **hypernyms}
)
for word in self.synsets[id].bag:
self.index[word].append(id)
def lexeme(self, record):
"""
Parse the sense representations like 'word#sid:freq'.
Actually, we do not care about the sid field because
we use synset identifiers instead.
"""
if '#' in record:
word, tail = record.split('#', 1)
else:
word, tail = record, None
if tail:
if ':' in tail:
sid, tail = tail.split(':', 1)
else:
sid, tail = tail, None
if tail:
freq = float(tail)
else:
freq = 1
return word, freq
Span = namedtuple('Span', 'token pos lemma index')
class BaseWSD(object):
"""
Base class for word sense disambiguation routines. Should not be used.
Descendant classes must implement the disambiguate_word() method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, inventory):
self.inventory = inventory
def lemmatize(self, sentence):
"""
This method transforms the given sentence into the dict that
maps the word indices to their lemmas. It also excludes those
words which part of speech is in the stop list.
"""
return {i: lemma for i, (_, lemma, pos) in enumerate(sentence)
if pos not in STOP_POS}
@abc.abstractmethod
def disambiguate_word(self, sentence, index):
"""
Return word sense identifier for the given word in the sentence.
"""
if not sentence or not isinstance(sentence, list):
raise ValueError('sentence should be a list')
if not isinstance(index, int) or index < 0 or index >= len(sentence):
raise ValueError('index should be in [0...%d]' % len(sentence))
def disambiguate(self, sentence):
"""
Return word sense identifiers corresponding to the words
in the given sentence.
"""
result = OrderedDict()
for index, span in enumerate(sentence):
# here, span is (token, pos, lemma), but we also need index
span = Span(*span, index)
result[span] = self.disambiguate_word(sentence, index)
return result
class OneBaseline(BaseWSD):
"""
A simple baseline that treats every word as monosemeous. Not thread-safe.
"""
counter = {}
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
word, _, _ = sentence[index]
if word not in self.counter:
self.counter[word] = len(self.counter)
return str(self.counter[word])
class SingletonsBaseline(BaseWSD):
"""
A simple baseline that puts every instance into a different cluster. Not thread-safe.
"""
counter = 0
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
self.counter += 1
return str(self.counter)
class SparseWSD(BaseWSD):
"""
A simple sparse word sense disambiguation.
"""
sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())])
def __init__(self, inventory):
super().__init__(inventory)
self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()])
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sparse.transform(Counter(lemmas.values())) # sentence vector
def search(query):
"""
Map synset identifiers to the cosine similarity value.
This function calls the function query(id) that retrieves
the corresponding dict of words.
"""
return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0)
for id in self.inventory.index[lemmas[index]]})
candidates = search(lambda id: self.inventory.synsets[id].synonyms)
# give the hypernyms a chance if nothing is found
if not candidates:
candidates = search(lambda id: self.inventory.synsets[id].bag)
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class DenseWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on SenseGram.
"""
class densedict(dict):
"""
A handy dict that transforms a synset into its dense representation.
"""
def __init__(self, synsets, sensegram):
self.synsets = synsets
self.sensegram = sensegram
def __missing__(self, id):
value = self[id] = self.sensegram(self.synsets[id].bag.keys())
return value
def __init__(self, inventory, wv):
super().__init__(inventory)
self.wv = wv
self.dense = self.densedict(self.inventory.synsets, self.sensegram)
def sensegram(self, words):
"""
This is a simple implementation of SenseGram.
It just averages the embeddings corresponding to the given words.
"""
vectors = self.words_vec(set(words))
if not vectors:
return
return np.mean(np.vstack(tuple(vectors.values())), axis=0).reshape(1, -1)
def words_vec(self, words, use_norm=False):
"""
Return a dict that maps the given words to their embeddings.
"""
if callable(getattr(self.wv, 'words_vec', None)):
return self.wv.words_vec(words, use_norm)
return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv}
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sensegram(lemmas.values()) # sentence vector
if svector is None:
return
# map synset identifiers to the cosine similarity value
candidates = Counter({id: sim(svector, self.dense[id]).item(0)
for id in self.inventory.index[lemmas[index]]
if self.dense[id] is not None})
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class LeskWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on Lesk method.
"""
def __init__(self, inventory):
super().__init__(inventory)
def disambiguate_word(self, sentence, word_index):
super().disambiguate_word(sentence, word_index)
lemmas = self.lemmatize(sentence)
if word_index not in lemmas:
return
mentions_dict = dict()
for synset_number in self.inventory.index[lemmas[word_index]]:
mentions_dict[synset_number] = 0
for context_word in lemmas.values():
if context_word != lemmas[word_index]:
if context_word in self.inventory.synsets[synset_number].synonyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + 1
elif context_word in self.inventory.synsets[synset_number].hypernyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + \
self.inventory.synsets[synset_number].hypernyms[context_word]
if len(mentions_dict) > 0:
return max(mentions_dict, key=mentions_dict.get)
else:
return
|
#!/usr/bin/env python3
from password_manager import User, Credentials
def create_user(fname, lname, uname, email, password):
'''
Function that creates a new user
'''
new_user = User(fname, lname, uname, email, password)
return new_user
def save_users(user):
'''
Function that saves a new user
'''
user.save_user()
def del_user(user):
'''
Function that deletes a user
'''
user.delete_contact()
def find_user(username):
'''
Function that finds a user by username and returns the user
'''
return User.find_by_username(username)
def display_users():
'''
Function that returns all the saved users
'''
return User.display_users()
def check_existing_users(username):
'''
Function that checks if a user is existing and returns a Boolean
'''
return User.user_exist(username)
def main():
print("Hi there. Welcome to your password manager app. Kindly let me know your name.")
user_name = input()
print(f"Hi {user_name}! What would you like to do today?")
print("\n")
while True:
print("Use the following numbers: 1 - Create new user account, 2 - Display user accounts, 3 - Find account, 4 - Delete account, 5 - Exit")
number = input()
if number == '1':
print("New User Account Creation")
print("-"*10)
print("First name ...")
f_name = input()
print("Last name ...")
l_name = input()
print("Username ...")
u_name = input()
print("Email address ...")
e_address = input()
print("Password ...")
password = input()
save_users(create_user(f_name, l_name, u_name, e_address, password)) #Create and save new user details
print("\n")
print(f"New User {u_name} created")
print('\n')
elif number == '2':
if display_users():
print("User Accounts:")
print('\n')
for user in display_users():
print(f"{user.first_name} {user.last_name} {user.user_name} {user.email}")
print('\n')
else:
print('\n')
print("Please create an account")
elif number == '3':
print("Enter the username you want to search for:")
search_username = input()
if check_existing_users(search_username):
search_user = find_user(search_username)
print(f"{search_user.first_name} {search_user.last_name}")
print("-"*10)
print(f"Username....{search_user.user_name}")
print(f"Email address....{search_user.email}")
else:
print("That user does not exist.")
elif number == '5':
print("Bye. Come again soon.")
break
else:
print("Please use the correct number")
if __name__ == '__main__':
main()
|
import sys
import os
import torch
import pandas as pd
import datetime
from argparse import ArgumentParser
import numpy as np
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
from icecream import ic
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
from network.ecgresnet_auxout import ECGResNet_AuxOut
from utils.helpers import create_results_directory
from utils.focalloss_weights import FocalLoss
class ECGResNetEnsemble_AuxOutSystem(pl.LightningModule):
"""
This class implements the ECGResNet with ensemble and auxiliary output in PyTorch Lightning.
It can estimate the epistemic and aleatoric uncertainty of its predictions.
"""
def __init__(self, in_channels, n_grps, N,
num_classes, dropout, first_width, stride,
dilation, learning_rate, ensemble_size, n_logit_samples, loss_weights=None,
**kwargs):
"""
Initializes the ECGResNetEnsemble_AuxOutSystem
Args:
in_channels: number of channels of input
n_grps: number of ResNet groups
N: number of blocks per groups
num_classes: number of classes of the classification problem
dropout: probability of an argument to get zeroed in the dropout layer
first_width: width of the first input
stride: tuple with stride value per block per group
dilation: spacing between the kernel points of the convolutional layers
learning_rate: the learning rate of the model
ensemble_size: the number of models that make up the ensemble
n_logit_samples: number of logit samples of the auxiliary output
loss_weights: array of weights for the loss term
"""
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.num_classes = num_classes
self.ensemble_size = ensemble_size
self.n_logit_samples = n_logit_samples
self.IDs = torch.empty(0).type(torch.LongTensor)
self.predicted_labels = torch.empty(0).type(torch.LongTensor)
self.correct_predictions = torch.empty(0).type(torch.BoolTensor)
self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.total_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.models = []
self.optimizers = []
for i in range(self.ensemble_size):
self.models.append(ECGResNet_AuxOut(in_channels,
n_grps, N, num_classes,
dropout, first_width,
stride, dilation)
)
if loss_weights is not None:
weights = torch.tensor(loss_weights, dtype = torch.float)
else:
weights = loss_weights
self.loss = FocalLoss(gamma=1, weights = weights)
def forward(self, x, model_idx):
"""Performs a forward through a single ensemble member.
Args:
x (tensor): Input data.
model_idx (int): Index of the ensemble member.
Returns:
output1: Output at the auxiliary point of the ensemble member
output2: Output at the end of the ensemble member
output2_log_var: The log variance of the ensemble_member
"""
output1, output2_mean, output2_log_var = self.models[model_idx](x)
return output1, output2_mean, output2_log_var
def training_step(self, batch, batch_idx, optimizer_idx):
"""Performs a training step for all ensemble members.
Args:
batch (dict): Output of the dataloader.
batch_idx (int): Index no. of this batch.
Returns:
tensor: Total loss for this step.
"""
data, target = batch['waveform'], batch['label']
losses = []
for model_idx in range(self.ensemble_size):
# Make prediction
output1, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
train_loss1 = self.loss(output1, target)
train_loss2 = self.loss(x_i, target)
total_train_loss = (0.3 * train_loss1) + train_loss2
# Update weights for each model using individual optimizers
self.manual_backward(total_train_loss, self.optimizers[model_idx])
self.optimizers[model_idx].step()
self.optimizers[model_idx].zero_grad()
losses.append(total_train_loss.item())
self.log('model_{}_train_loss'.format(model_idx), total_train_loss)
average_train_loss = np.mean(losses)
self.log('average_train_loss', average_train_loss)
return {'loss': average_train_loss}
def validation_step(self, batch, batch_idx):
prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
data, target = batch['waveform'], batch['label']
# Predict for each model
for model_idx in range(self.ensemble_size):
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning avector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
prediction_individual[:, model_idx] = x_i
# Calculate mean over predictions from individual ensemble members
prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)
val_loss = self.loss(prediction_ensemble_mean, target)
acc = FM.accuracy(prediction_ensemble_mean, target)
# loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on'
metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}
self.log('val_acc', acc.item())
self.log('val_loss', val_loss.item())
return metrics
def test_step(self, batch, batch_idx, save_to_csv=False):
prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
data, target = batch['waveform'], batch['label']
# Predict for each model
for model_idx, model in enumerate(self.models):
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
prediction_individual[:, model_idx] = x_i.data
# Take exponent to get the variance
output2_var = output2_log_var.exp()
aleatoric_var[:, model_idx] = output2_var.data
# Calculate mean and variance over predictions from individual ensemble members
prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)
prediction_ensemble_var = torch.var(prediction_individual, dim=1)
# Get the average aleatoric uncertainty for each prediction
prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1)
# Select the predicted labels
predicted_labels = prediction_ensemble_mean.argmax(dim=1)
test_loss = self.loss(prediction_ensemble_mean, target)
acc = FM.accuracy(prediction_ensemble_mean, target)
# Get the epistemic variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
# Get the aleatoric variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
total_var = predicted_labels_var + predicted_labels_aleatoric_var
# Log and save metrics
self.log('test_acc', acc.item())
self.log('test_loss', test_loss.item())
self.IDs = torch.cat((self.IDs, batch['id']), 0)
self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)
self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)
self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0)
self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0)
self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)
return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}
def configure_optimizers(self):
"""
Initialize an optimizer for each model in the ensemble
"""
for i in range(self.ensemble_size):
self.optimizers.append(optim.Adam(self.models[i].parameters(), lr=self.learning_rate))
return self.optimizers
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--model_name', type=str, default='ensemble_none')
parser.add_argument('--ensemble_size', type=int, default=5)
parser.add_argument('--ensembling_method', type=bool, default=True)
parser.add_argument('--n_logit_samples', type=int, default=100)
return parser
def save_results(self):
"""
Combine results into single dataframe and save to disk as .csv file
"""
results = pd.concat([
pd.DataFrame(self.IDs.numpy(), columns= ['ID']),
pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),
pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),
pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']),
pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']),
pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']),
], axis=1)
create_results_directory()
results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
|
"""
Setup authentication from various providers
"""
import json
import os
import subprocess
import shutil
from hubploy.config import get_config
from ruamel.yaml import YAML
yaml = YAML(typ='rt')
def registry_auth(deployment):
"""
Do appropriate registry authentication for given deployment
"""
config = get_config(deployment)
if 'images' in config and 'registry' in config['images']:
registry = config['images']['registry']
provider = registry.get('provider')
if provider == 'gcloud':
registry_auth_gcloud(
deployment, **registry['gcloud']
)
elif provider == 'aws':
registry_auth_aws(
deployment, **registry['aws']
)
elif provider == 'azure':
registry_auth_azure(
deployment, **registry['azure']
)
else:
raise ValueError(
f'Unknown provider {provider} found in hubploy.yaml')
def registry_auth_gcloud(deployment, project, service_key):
"""
Setup GCR authentication with a service_key
This changes *global machine state* on where docker can push to!
"""
service_key_path = os.path.join(
'deployments', deployment, 'secrets', service_key
)
subprocess.check_call([
'gcloud', 'auth',
'activate-service-account',
'--key-file', os.path.abspath(service_key_path)
])
subprocess.check_call([
'gcloud', 'auth', 'configure-docker'
])
def registry_auth_aws(deployment, project, zone, service_key):
"""
Setup AWS authentication to ECR container registry
This changes *global machine state* on where docker can push to!
"""
service_key_path = os.path.join(
'deployments', deployment, 'secrets', service_key
)
if not os.path.isfile(service_key_path):
raise FileNotFoundError(
f'The service_key file {service_key_path} does not exist')
# move credentials to standard location
cred_dir = os.path.expanduser('~/.aws')
if not os.path.isdir(cred_dir):
os.mkdir(cred_dir)
shutil.copyfile(service_key_path, os.path.join(cred_dir, 'credentials'))
registry = f'{project}.dkr.ecr.{zone}.amazonaws.com'
# amazon-ecr-credential-helper installed in .circleci/config.yaml
# this adds necessary line to authenticate docker with ecr
docker_config_dir = os.path.expanduser('~/.docker')
os.makedirs(docker_config_dir, exist_ok=True)
docker_config = os.path.join(docker_config_dir, 'config.json')
if os.path.exists(docker_config):
with open(docker_config, 'r') as f:
config = json.load(f)
else:
config = {'credHelpers': {}}
config['credHelpers'][registry] = 'ecr-login'
with open(docker_config, 'w') as f:
json.dump(config, f)
def registry_auth_azure(deployment, resource_group, registry, auth_file):
"""
Azure authentication for ACR
In hubploy.yaml include:
registry:
provider: azure
azure:
resource_group: resource_group_name
registry: registry_name
auth_file: azure_auth_file.yaml
The azure_service_principal.json file should have the following
keys: appId, tenant, password. This is the format produced
by the az command when creating a service principal.
See https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal
"""
# parse Azure auth file
auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file)
with open(auth_file_path) as f:
auth = yaml.load(f)
# log in
subprocess.check_call([
'az', 'login', '--service-principal',
'--user', auth['appId'],
'--tenant', auth['tenant'],
'--password', auth['password']
])
# log in to ACR
subprocess.check_call([
'az', 'acr', 'login',
'--name', registry
])
def cluster_auth(deployment):
"""
Do appropriate cluster authentication for given deployment
"""
config = get_config(deployment)
if 'cluster' in config:
cluster = config['cluster']
provider = cluster.get('provider')
if provider == 'gcloud':
cluster_auth_gcloud(
deployment, **cluster['gcloud']
)
elif provider == 'aws':
cluster_auth_aws(
deployment, **cluster['aws']
)
elif provider == 'azure':
cluster_auth_azure(
deployment, **cluster['azure']
)
else:
raise ValueError(
f'Unknown provider {provider} found in hubploy.yaml')
def cluster_auth_gcloud(deployment, project, cluster, zone, service_key):
"""
Setup GKE authentication with service_key
This changes *global machine state* on what current kubernetes cluster is!
"""
service_key_path = os.path.join(
'deployments', deployment, 'secrets', service_key
)
subprocess.check_call([
'gcloud', 'auth',
'activate-service-account',
'--key-file', os.path.abspath(service_key_path)
])
subprocess.check_call([
'gcloud', 'container', 'clusters',
f'--zone={zone}',
f'--project={project}',
'get-credentials', cluster
])
def cluster_auth_aws(deployment, project, cluster, zone, service_key):
"""
Setup AWS authentication with service_key
This changes *global machine state* on what current kubernetes cluster is!
"""
# move credentials to standard location
service_key_path = os.path.join(
'deployments', deployment, 'secrets', service_key
)
cred_dir = os.path.expanduser('~/.aws')
if not os.path.isdir(cred_dir):
os.mkdir(cred_dir)
shutil.copyfile(service_key_path, os.path.join(cred_dir, 'credentials'))
subprocess.check_call(['aws2', 'eks', 'update-kubeconfig',
'--name', cluster, '--region', zone])
def cluster_auth_azure(deployment, resource_group, cluster, auth_file):
"""
Azure authentication for AKS
In hubploy.yaml include:
cluster:
provider: azure
azure:
resource_group: resource_group_name
cluster: cluster_name
auth_file: azure_auth_file.yaml
The azure_service_principal.json file should have the following
keys: appId, tenant, password. This is the format produced
by the az command when creating a service principal.
"""
# parse Azure auth file
auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file)
with open(auth_file_path) as f:
auth = yaml.load(f)
# log in
subprocess.check_call([
'az', 'login', '--service-principal',
'--user', auth['appId'],
'--tenant', auth['tenant'],
'--password', auth['password']
])
# get cluster credentials
subprocess.check_call([
'az', 'aks', 'get-credentials',
'--name', cluster,
'--resource-group', resource_group
])
|
import uuid
from fastapi import Depends, FastAPI, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.security import OAuth2PasswordRequestForm
from pydantic import BaseSettings, BaseModel, UUID4
from fastapi_login import LoginManager
from fastapi_login.exceptions import InvalidCredentialsException
class Settings(BaseSettings):
secret: str # autmatically taken from environement variable
class UserCreate(BaseModel):
email: str
password: str
class User(UserCreate):
id: UUID4
DEFAULT_SETTINGS = Settings(_env_file=".env")
DB = {
"users": {}
}
TOKEN_URL = "/auth/token"
app = FastAPI()
manager = LoginManager(DEFAULT_SETTINGS.secret, TOKEN_URL)
@manager.user_loader
def get_user(email: str):
return DB["users"].get(email)
@app.get("/")
def index():
with open("./templates/index.html", 'r') as f:
return HTMLResponse(content=f.read())
@app.post("/auth/register")
def register(user: UserCreate):
if user.email in DB["users"]:
raise HTTPException(status_code=400, detail="A user with this email already exists")
else:
db_user = User(**user.dict(), id=uuid.uuid4())
# PLEASE hash your passwords in real world applications
DB["users"][db_user.email] = db_user
return {"detail": "Successfull registered"}
@app.post(TOKEN_URL)
def login(data: OAuth2PasswordRequestForm = Depends()):
email = data.username
password = data.password
user = get_user(email) # we are using the same function to retrieve the user
if not user:
raise InvalidCredentialsException # you can also use your own HTTPException
elif password != user.password:
raise InvalidCredentialsException
access_token = manager.create_access_token(
data=dict(sub=email)
)
return {'access_token': access_token, 'token_type': 'bearer'}
@app.get("/private")
def private_route(user=Depends(manager)):
return {"detail": f"Welcome {user.email}"}
if __name__ == "__main__":
import uvicorn
uvicorn.run("app:app")
|
import json
import os
import sys
import shutil
import tempfile
import unittest
import ray
import ray.cloudpickle as cloudpickle
from ray.rllib import _register_all
from ray import tune
from ray.tune.logger import NoopLogger
from ray.tune.utils.trainable import TrainableUtil
from ray.tune.function_runner import with_parameters, wrap_function, \
FuncCheckpointUtil
from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION
def creator_generator(logdir):
def logger_creator(config):
return NoopLogger(config, logdir)
return logger_creator
class FuncCheckpointUtilTest(unittest.TestCase):
def setUp(self):
self.logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.logdir)
def testEmptyCheckpoint(self):
checkpoint_dir = FuncCheckpointUtil.mk_null_checkpoint_dir(self.logdir)
assert FuncCheckpointUtil.is_null_checkpoint(checkpoint_dir)
def testTempCheckpointDir(self):
checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)
assert FuncCheckpointUtil.is_temp_checkpoint_dir(checkpoint_dir)
def testConvertTempToPermanent(self):
checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)
new_checkpoint_dir = FuncCheckpointUtil.create_perm_checkpoint(
checkpoint_dir, self.logdir, step=4)
assert new_checkpoint_dir == TrainableUtil.find_checkpoint_dir(
new_checkpoint_dir)
assert os.path.exists(new_checkpoint_dir)
assert not FuncCheckpointUtil.is_temp_checkpoint_dir(
new_checkpoint_dir)
tmp_checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(
self.logdir)
assert tmp_checkpoint_dir != new_checkpoint_dir
class FunctionCheckpointingTest(unittest.TestCase):
def setUp(self):
self.logdir = tempfile.mkdtemp()
self.logger_creator = creator_generator(self.logdir)
def tearDown(self):
shutil.rmtree(self.logdir)
def testCheckpointReuse(self):
"""Test that repeated save/restore never reuses same checkpoint dir."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
count = sum("checkpoint-" in path
for path in os.listdir(checkpoint_dir))
assert count == 1, os.listdir(checkpoint_dir)
for step in range(20):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(step))
open(path, "a").close()
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore(checkpoint)
for i in range(2):
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
assert result[TRAINING_ITERATION] == 10
def testCheckpointReuseObject(self):
"""Test that repeated save/restore never reuses same checkpoint dir."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
count = sum("checkpoint-" in path
for path in os.listdir(checkpoint_dir))
assert count == 1, os.listdir(checkpoint_dir)
for step in range(20):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(step))
open(path, "a").close()
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore_from_object(checkpoint)
for i in range(2):
result = new_trainable.train()
checkpoint = new_trainable.save_to_object()
new_trainable.stop()
self.assertTrue(result[TRAINING_ITERATION] == 10)
def testCheckpointReuseObjectWithoutTraining(self):
"""Test that repeated save/restore never reuses same checkpoint dir."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
count = sum("checkpoint-" in path
for path in os.listdir(checkpoint_dir))
assert count == 1, os.listdir(checkpoint_dir)
for step in range(20):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(step))
open(path, "a").close()
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
for i in range(2):
result = new_trainable.train()
checkpoint = new_trainable.save_to_object()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore_from_object(checkpoint)
new_trainable2.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore_from_object(checkpoint)
result = new_trainable2.train()
new_trainable2.stop()
self.assertTrue(result[TRAINING_ITERATION] == 3)
def testReuseNullCheckpoint(self):
def train(config, checkpoint_dir=None):
assert not checkpoint_dir
for step in range(10):
tune.report(test=step)
# Create checkpoint
wrapped = wrap_function(train)
checkpoint = None
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
# Use the checkpoint a couple of times
for i in range(3):
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.restore(checkpoint)
new_trainable.stop()
# Make sure the result is still good
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.restore(checkpoint)
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
self.assertTrue(result[TRAINING_ITERATION] == 1)
def testMultipleNullCheckpoints(self):
def train(config, checkpoint_dir=None):
assert not checkpoint_dir
for step in range(10):
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore(checkpoint)
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
self.assertTrue(result[TRAINING_ITERATION] == 1)
def testMultipleNullMemoryCheckpoints(self):
def train(config, checkpoint_dir=None):
assert not checkpoint_dir
for step in range(10):
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore_from_object(checkpoint)
result = new_trainable.train()
checkpoint = new_trainable.save_to_object()
new_trainable.stop()
assert result[TRAINING_ITERATION] == 1
def testFunctionNoCheckpointing(self):
def train(config, checkpoint_dir=None):
if checkpoint_dir:
assert os.path.exists(checkpoint_dir)
for step in range(10):
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore(checkpoint)
result = new_trainable2.train()
self.assertEquals(result[TRAINING_ITERATION], 1)
checkpoint = new_trainable2.save()
new_trainable2.stop()
def testFunctionRecurringSave(self):
"""This tests that save and restore are commutative."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
assert os.path.exists(checkpoint_dir)
for step in range(10):
if step % 3 == 0:
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"step": step}))
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.train()
checkpoint_obj = new_trainable.save_to_object()
new_trainable.restore_from_object(checkpoint_obj)
checkpoint = new_trainable.save()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore(checkpoint)
new_trainable2.train()
new_trainable2.stop()
def testFunctionImmediateSave(self):
"""This tests that save and restore are commutative."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
assert os.path.exists(checkpoint_dir)
for step in range(10):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
print(checkpoint_dir)
path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(step))
open(path, "w").close()
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.train()
new_trainable.train()
checkpoint_obj = new_trainable.save_to_object()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore_from_object(checkpoint_obj)
checkpoint_obj = new_trainable2.save_to_object()
new_trainable2.train()
result = new_trainable2.train()
assert sum("tmp" in path for path in os.listdir(self.logdir)) == 1
new_trainable2.stop()
assert sum("tmp" in path for path in os.listdir(self.logdir)) == 0
assert result[TRAINING_ITERATION] == 4
class FunctionApiTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testCheckpointError(self):
def train(config, checkpoint_dir=False):
pass
with self.assertRaises(ValueError):
tune.run(train, checkpoint_freq=1)
with self.assertRaises(ValueError):
tune.run(train, checkpoint_at_end=True)
def testCheckpointFunctionAtEnd(self):
def train(config, checkpoint_dir=False):
for i in range(10):
tune.report(test=i)
with tune.checkpoint_dir(step=10) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write("hello")
[trial] = tune.run(train).trials
assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log"))
def testCheckpointFunctionAtEndContext(self):
def train(config, checkpoint_dir=False):
for i in range(10):
tune.report(test=i)
with tune.checkpoint_dir(step=10) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write("hello")
[trial] = tune.run(train).trials
assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log"))
def testVariousCheckpointFunctionAtEnd(self):
def train(config, checkpoint_dir=False):
for i in range(10):
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write("hello")
tune.report(test=i)
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log2")
with open(checkpoint_path, "w") as f:
f.write("goodbye")
[trial] = tune.run(train, keep_checkpoints_num=3).trials
assert os.path.exists(
os.path.join(trial.checkpoint.value, "ckpt.log2"))
def testReuseCheckpoint(self):
def train(config, checkpoint_dir=None):
itr = 0
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f:
itr = int(f.read()) + 1
for i in range(itr, config["max_iter"]):
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write(str(i))
tune.report(test=i, training_iteration=i)
[trial] = tune.run(
train,
config={
"max_iter": 5
},
).trials
last_ckpt = trial.checkpoint.value
assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log"))
analysis = tune.run(train, config={"max_iter": 10}, restore=last_ckpt)
trial_dfs = list(analysis.trial_dataframes.values())
assert len(trial_dfs[0]["training_iteration"]) == 5
def testRetry(self):
def train(config, checkpoint_dir=None):
restored = bool(checkpoint_dir)
itr = 0
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f:
itr = int(f.read()) + 1
for i in range(itr, 10):
if i == 5 and not restored:
raise Exception("try to fail me")
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write(str(i))
tune.report(test=i, training_iteration=i)
analysis = tune.run(train, max_failures=3)
last_ckpt = analysis.trials[0].checkpoint.value
assert os.path.exists(os.path.join(last_ckpt, "ckpt.log"))
trial_dfs = list(analysis.trial_dataframes.values())
assert len(trial_dfs[0]["training_iteration"]) == 10
def testEnabled(self):
def train(config, checkpoint_dir=None):
is_active = tune.is_session_enabled()
if is_active:
tune.report(active=is_active)
return is_active
assert train({}) is False
analysis = tune.run(train)
t = analysis.trials[0]
assert t.last_result["active"]
def testBlankCheckpoint(self):
def train(config, checkpoint_dir=None):
restored = bool(checkpoint_dir)
itr = 0
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f:
itr = int(f.read()) + 1
for i in range(itr, 10):
if i == 5 and not restored:
raise Exception("try to fail me")
with tune.checkpoint_dir(step=itr) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write(str(i))
tune.report(test=i, training_iteration=i)
analysis = tune.run(train, max_failures=3)
trial_dfs = list(analysis.trial_dataframes.values())
assert len(trial_dfs[0]["training_iteration"]) == 10
def testWithParameters(self):
class Data:
def __init__(self):
self.data = [0] * 500_000
data = Data()
data.data[100] = 1
def train(config, data=None):
data.data[101] = 2 # Changes are local
tune.report(metric=len(data.data), hundred=data.data[100])
trial_1, trial_2 = tune.run(
with_parameters(train, data=data), num_samples=2).trials
self.assertEquals(data.data[101], 0)
self.assertEquals(trial_1.last_result["metric"], 500_000)
self.assertEquals(trial_1.last_result["hundred"], 1)
self.assertEquals(trial_2.last_result["metric"], 500_000)
self.assertEquals(trial_2.last_result["hundred"], 1)
self.assertTrue(str(trial_1).startswith("train_"))
# With checkpoint dir parameter
def train(config, checkpoint_dir="DIR", data=None):
data.data[101] = 2 # Changes are local
tune.report(metric=len(data.data), cp=checkpoint_dir)
trial_1, trial_2 = tune.run(
with_parameters(train, data=data), num_samples=2).trials
self.assertEquals(data.data[101], 0)
self.assertEquals(trial_1.last_result["metric"], 500_000)
self.assertEquals(trial_1.last_result["cp"], "DIR")
self.assertEquals(trial_2.last_result["metric"], 500_000)
self.assertEquals(trial_2.last_result["cp"], "DIR")
self.assertTrue(str(trial_1).startswith("train_"))
def testWithParameters2(self):
class Data:
def __init__(self):
import numpy as np
self.data = np.random.rand((2 * 1024 * 1024))
def train(config, data=None):
tune.report(metric=len(data.data))
trainable = tune.with_parameters(train, data=Data())
dumped = cloudpickle.dumps(trainable)
assert sys.getsizeof(dumped) < 100 * 1024
def testReturnAnonymous(self):
def train(config):
return config["a"]
trial_1, trial_2 = tune.run(
train, config={
"a": tune.grid_search([4, 8])
}).trials
self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4)
self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8)
def testReturnSpecific(self):
def train(config):
return {"m": config["a"]}
trial_1, trial_2 = tune.run(
train, config={
"a": tune.grid_search([4, 8])
}).trials
self.assertEquals(trial_1.last_result["m"], 4)
self.assertEquals(trial_2.last_result["m"], 8)
def testYieldAnonymous(self):
def train(config):
for i in range(10):
yield config["a"] + i
trial_1, trial_2 = tune.run(
train, config={
"a": tune.grid_search([4, 8])
}).trials
self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4 + 9)
self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8 + 9)
def testYieldSpecific(self):
def train(config):
for i in range(10):
yield {"m": config["a"] + i}
trial_1, trial_2 = tune.run(
train, config={
"a": tune.grid_search([4, 8])
}).trials
self.assertEquals(trial_1.last_result["m"], 4 + 9)
self.assertEquals(trial_2.last_result["m"], 8 + 9)
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes api."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import uuidutils
from six.moves import http_client
import webob
from webob import exc
from cinder.api import api_utils
from cinder.api import common
from cinder.api.contrib import scheduler_hints
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volumes
from cinder.api.v2.views import volumes as volume_views
from cinder.api import validation
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder.image import glance
from cinder import objects
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
_view_builder_class = volume_views.ViewBuilder
def __init__(self, ext_mgr):
self.volume_api = cinder_volume.API()
self.group_api = group_api.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
vol = self.volume_api.get(context, id, viewable_admin_meta=True)
req.cache_db_volume(vol)
api_utils.add_visible_admin_metadata(vol)
return self._view_builder.detail(req, vol)
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['cinder.context']
cascade = utils.get_bool_param('cascade', req.params)
LOG.info("Delete volume with id: %s", id)
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume, cascade=cascade)
return webob.Response(status_int=http_client.ACCEPTED)
def index(self, req):
"""Returns a summary list of volumes."""
return self._get_volumes(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._get_volumes(req, is_detail=True)
def _get_volumes(self, req, is_detail):
"""Returns a list of volumes, transformed through view builder."""
context = req.environ['cinder.context']
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
# NOTE(wanghao): Always removing glance_metadata since we support it
# only in API version >= VOLUME_LIST_GLANCE_METADATA.
filters.pop('glance_metadata', None)
api_utils.remove_invalid_filter_options(
context,
filters,
self._get_volume_filter_options())
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in sort_keys:
sort_keys[sort_keys.index('name')] = 'display_name'
if 'name' in filters:
filters['display_name'] = filters.pop('name')
self.volume_api.check_volume_filters(filters)
volumes = self.volume_api.get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
viewable_admin_meta=True,
offset=offset)
for volume in volumes:
api_utils.add_visible_admin_metadata(volume)
req.cache_db_volumes(volumes.objects)
if is_detail:
volumes = self._view_builder.detail_list(req, volumes)
else:
volumes = self._view_builder.summary_list(req, volumes)
return volumes
def _image_uuid_from_ref(self, image_ref, context):
# If the image ref was generated by nova api, strip image_ref
# down to an id.
image_uuid = None
try:
image_uuid = image_ref.split('/').pop()
except AttributeError:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
image_service = glance.get_default_image_service()
# First see if this is an actual image ID
if uuidutils.is_uuid_like(image_uuid):
try:
image = image_service.show(context, image_uuid)
if 'id' in image:
return image['id']
except Exception:
# Pass and see if there is a matching image name
pass
# Could not find by ID, check if it is an image name
try:
params = {'filters': {'name': image_ref}}
images = list(image_service.detail(context, **params))
if len(images) > 1:
msg = _("Multiple matches found for '%s', use an ID to be more"
" specific.") % image_ref
raise exc.HTTPConflict(explanation=msg)
for img in images:
return img['id']
except exc.HTTPConflict:
raise
except Exception:
# Pass the other exception and let default not found error
# handling take care of it
pass
msg = _("Invalid image identifier or unable to "
"access requested image.")
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(http_client.ACCEPTED)
@validation.schema(volumes.create, mv.V2_BASE_VERSION)
def create(self, req, body):
"""Creates a new volume."""
LOG.debug('Create volume request body: %s', body)
context = req.environ['cinder.context']
# NOTE (pooja_jadhav) To fix bug 1774155, scheduler hints is not
# loaded as a standard extension. If user passes
# OS-SCH-HNT:scheduler_hints in the request body, then it will be
# validated in the create method and this method will add
# scheduler_hints in body['volume'].
body = scheduler_hints.create(req, body)
volume = body['volume']
kwargs = {}
self.validate_name_and_description(volume, check_length=False)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in volume:
volume['display_name'] = volume.pop('name')
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in volume:
volume['display_description'] = volume.pop('description')
if 'image_id' in volume:
volume['imageRef'] = volume.pop('image_id')
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
# Not found exception will be handled at the wsgi level
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(context, req_volume_type))
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
# Not found exception will be handled at the wsgi level
kwargs['source_volume'] = \
self.volume_api.get_volume(context,
source_volid)
else:
kwargs['source_volume'] = None
kwargs['group'] = None
kwargs['consistencygroup'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, consistencygroup_id)
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
LOG.info("Create volume of %s GB", size)
image_ref = volume.get('imageRef')
if image_ref is not None:
image_uuid = self._image_uuid_from_ref(image_ref, context)
kwargs['image_id'] = image_uuid
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
kwargs['multiattach'] = utils.get_bool_param('multiattach', volume)
if kwargs.get('multiattach', False):
msg = ("The option 'multiattach' "
"is deprecated and will be removed in a future "
"release. The default behavior going forward will "
"be to specify multiattach enabled volume types.")
versionutils.report_deprecated_feature(LOG, msg)
try:
new_volume = self.volume_api.create(
context, size, volume.get('display_name'),
volume.get('display_description'), **kwargs)
except exception.VolumeTypeDefaultMisconfiguredError as err:
raise webob.exc.HTTPInternalServerError(explanation=err.msg)
retval = self._view_builder.detail(req, new_volume)
return retval
def _get_volume_filter_options(self):
"""Return volume search options allowed by non-admin."""
return common.get_enabled_resource_filters('volume')['volume']
@validation.schema(volumes.update, mv.V2_BASE_VERSION,
mv.get_prior_version(mv.SUPPORT_VOLUME_SCHEMA_CHANGES))
@validation.schema(volumes.update_volume_v353,
mv.SUPPORT_VOLUME_SCHEMA_CHANGES)
def update(self, req, id, body):
"""Update a volume."""
context = req.environ['cinder.context']
update_dict = body['volume']
self.validate_name_and_description(update_dict, check_length=False)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in update_dict:
update_dict['display_name'] = update_dict.pop('name')
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in update_dict:
update_dict['display_description'] = update_dict.pop('description')
# Not found and Invalid exceptions will be handled at the wsgi level
try:
volume = self.volume_api.get(context, id, viewable_admin_meta=True)
volume_utils.notify_about_volume_usage(context, volume,
'update.start')
self.volume_api.update(context, volume, update_dict)
except exception.InvalidVolumeMetadataSize as error:
raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg)
volume.update(update_dict)
api_utils.add_visible_admin_metadata(volume)
volume_utils.notify_about_volume_usage(context, volume,
'update.end')
return self._view_builder.detail(req, volume)
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
|
import os
import random
import subprocess
import numpy as np
import torch
import time
try:
import torch_xla
import torch_xla.core.xla_model as xm
XLA = True
except ModuleNotFoundError:
XLA = False
def freeze_module(module):
for i, param in enumerate(module.parameters()):
param.requires_grad = False
def fit_state_dict(state_dict, model):
'''
Ignore size mismatch when loading state_dict
'''
for name, param in model.named_parameters():
new_param = state_dict[name]
if new_param.size() != param.size():
print(f'Size mismatch in {name}: {new_param.shape} -> {param.shape}')
state_dict.pop(name)
def get_device(arg):
if isinstance(arg, torch.device) or \
(XLA and isinstance(arg, xm.xla_device)):
device = arg
elif arg is None or isinstance(arg, (list, tuple)):
if XLA:
device = xm.xla_device()
else:
device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
elif isinstance(arg, str):
if arg == 'xla' and XLA:
device = xm.xla_device()
else:
device = torch.device(arg)
if isinstance(arg, (list, tuple)):
if isinstance(arg[0], int):
device_ids = list(arg)
elif isinstance(arg[0], str) and arg[0].isnumeric():
device_ids = [ int(a) for a in arg ]
else:
raise ValueError(f'Invalid device: {arg}')
else:
if device.type == 'cuda':
assert torch.cuda.is_available()
if device.index is None:
device_count = torch.cuda.device_count()
if device_count > 1:
device_ids = list(range(device_count))
else:
device_ids = [0]
else:
device_ids = [device.index]
else:
device_ids = [device.index]
return device, device_ids
def seed_everything(random_state=0, deterministic=False):
random.seed(random_state)
os.environ['PYTHONHASHSEED'] = str(random_state)
np.random.seed(random_state)
torch.manual_seed(random_state)
torch.cuda.manual_seed(random_state)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
torch.backends.cudnn.deterministic = False
def get_gpu_memory():
"""
Code borrowed from:
https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/4
Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def get_time(time_format='%H:%M:%S'):
return time.strftime(time_format, time.localtime())
|
import os
import resources_portal.models # noqa
from flask import Flask
from flask_migrate import Migrate
from flask_restful import Api
from resources_portal.db import db
from resources_portal.views import user
migrate = Migrate()
def initialize_routes(api: Api):
api.add_resource(user.UsersApi, "/users")
api.add_resource(user.UserApi, "/users/<user_id>")
def set_database_URI(app: Flask):
database_URI_template = "postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
app.config["SQLALCHEMY_DATABASE_URI"] = database_URI_template.format(
DB_USER=app.config["DB_USER"],
DB_PASSWORD=app.config["DB_PASSWORD"],
DB_HOST=os.environ["DB_HOST"],
DB_PORT=app.config["DB_PORT"],
DB_NAME=app.config["DB_NAME"],
)
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
app.config.from_envvar("RESOURCES_PORTAL_CONFIG_FILE")
set_database_URI(app)
api = Api(app)
initialize_routes(api)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
migrate.init_app(app, db)
from resources_portal.schemas import ma
ma.init_app(app)
return app
|
from CommonServerPython import *
reload(sys)
sys.setdefaultencoding('utf-8') # pylint: disable=E1101
requests.packages.urllib3.disable_warnings()
URL = demisto.getParam('server')
if URL[-1] != '/':
URL += '/'
if not demisto.getParam('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
VALIDATE_CERT = not demisto.params().get('insecure', True)
ID_AND_API_KEY = demisto.getParam('credentials')['identifier'] + ':' + demisto.getParam('credentials')['password']
ENCODED_AUTH_KEY = base64.b64encode(ID_AND_API_KEY.encode("utf-8"))
MSSP_ACCOUNT_ID = demisto.getParam('mssp_sub_account_id')
HEADERS = {'Authorization': 'Basic {}'.format(ENCODED_AUTH_KEY.decode()), 'Content-Type': 'application/json',
'Account-Id': demisto.getParam('credentials')['identifier']}
# Change the Account-Id to the sub account id, so all actions will be on the sub account.
if MSSP_ACCOUNT_ID:
HEADERS['Account-Id'] = MSSP_ACCOUNT_ID
IOC_TYPE_TO_DBOT_TYPE = {
'IpAddresses': 'ip',
'Urls': 'url',
'Domains': 'domain',
'Hashes': 'hash'
}
DEFAULT_TIME_RANGE = '1 day'
SEVERITY_LEVEL = {
'All': 0,
'Low': 1,
'Medium': 2,
'High': 3
}
def http_request(method, path, json_data=None, params=None, json_response=False):
"""
Send the request to IntSights and return the JSON response
"""
try:
response = requests.request(method, URL + path, headers=HEADERS, json=json_data,
params=params, verify=VALIDATE_CERT)
except requests.exceptions.SSLError:
raise Exception('Connection error in the API call to IntSights.\nCheck your not secure parameter.')
except requests.ConnectionError:
raise Exception('Connection error in the API call to IntSights.\nCheck your Server URL parameter.')
if response.status_code < 200 or response.status_code > 299:
if not (response.text == 'SeverityNotChanged' or response.text == 'TagExist'
or response.text == 'IocBlocklistStatusNotChanged'):
return_error('Error in API call to IntSights service %s - [%d] %s' %
(path, response.status_code, response.text))
if response.status_code == 204:
return [] # type: ignore
if json_response:
try:
return response.json()
except ValueError:
raise Exception('Error in API call to IntSights service - check your configured URL address')
return response
def convert_iso_string_to_python_date(date_in_iso_format):
iso_format = "%Y-%m-%dT%H:%M:%S"
date_in_python_format = datetime.strptime(date_in_iso_format, iso_format)
return date_in_python_format
def convert_python_date_to_unix_millisecond(python_date_object):
timestamp_in_unix_millisecond = date_to_timestamp(python_date_object, 'datetime.datetime')
return timestamp_in_unix_millisecond
def increase_iso_by_x_days(date_in_iso_format, num_of_days):
date_in_python_format = convert_iso_string_to_python_date(date_in_iso_format)
new_date_in_python_format = date_in_python_format + timedelta(days=int(num_of_days))
new_date_in_iso_format = new_date_in_python_format.isoformat()
return new_date_in_iso_format
def remove_milliseconds_from_iso(date_in_iso_format):
date_parts_arr = date_in_iso_format.split('.')
date_in_iso_without_milliseconds = date_parts_arr[0]
return date_in_iso_without_milliseconds
def increase_timestamp_by_x_days(date_in_unix_ms_timestamp, num_of_days):
date_in_iso = timestamp_to_datestring(date_in_unix_ms_timestamp)
date_in_iso_without_ms = remove_milliseconds_from_iso(date_in_iso)
date_in_iso_plus_x_days = increase_iso_by_x_days(date_in_iso_without_ms, num_of_days)
timestamp_in_unix_ms_plus_x_days = date_to_timestamp(date_in_iso_plus_x_days)
return timestamp_in_unix_ms_plus_x_days
def update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp):
params['foundDateFrom'] = oldest_day_to_search_in_unix_timestamp
params['foundDateTo'] = now_date_in_unix_timestamp
params['sourceDateFrom'] = oldest_day_to_search_in_unix_timestamp
params['sourceDateTo'] = now_date_in_unix_timestamp
def update_params_with_delta_arg(params, time_delta_in_days_int):
now_date_in_iso = datetime.utcnow().isoformat()
now_date_in_iso_without_ms = remove_milliseconds_from_iso(now_date_in_iso)
now_date_in_unix_timestamp = date_to_timestamp(now_date_in_iso_without_ms)
oldest_day_to_search_in_unix_timestamp = increase_timestamp_by_x_days(now_date_in_unix_timestamp,
-1 * time_delta_in_days_int)
update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp)
del params['time-delta']
def update_params_dict_according_to_delta_arg(params, time_delta_in_days_int):
if 'foundDateFrom' in params or 'foundDateTo' in params:
demisto.debug(
"ERROR in get_alerts() - can't use found-date-to or found-date-from arguments with time-delta argument")
return_error("Error: can't assign delta when assigned both found-date-to or found-date-from")
else:
update_params_with_delta_arg(params, time_delta_in_days_int)
return params
def handle_filters(found_date_from=None):
"""
Apply filters to alert list
"""
args_camel_case = {
'alert-type': 'alertType',
'source-type': 'sourceType',
'network-type': 'networkType',
'source-date-from': 'sourceDateFrom',
'source-date-to': 'sourceDateTo',
'found-date-from': 'foundDateFrom',
'found-date-to': 'foundDateTo',
'is-flagged': 'isFlagged',
'is-closed': 'isClosed',
'source-ID': 'sourceId',
'first-seen-from': 'firstSeenFrom',
'first-seen-to': 'firstSeenTo',
'last-seen-from': 'lastSeenFrom',
'last-seen-to': 'lastSeenTo',
'value': 'iocValue',
}
params = {}
for key in demisto.args():
if demisto.getArg(key):
params[args_camel_case.get(key) or key] = demisto.getArg(key)
if demisto.getArg('time-delta'):
time_delta_in_days = demisto.getArg('time-delta')
update_params_dict_according_to_delta_arg(params, int(time_delta_in_days))
elif found_date_from:
params['foundDateFrom'] = found_date_from
return params
def get_alerts_helper(params):
demisto.info("Executing get_alerts with params: {}".format(params))
response = http_request('GET', 'public/v1/data/alerts/alerts-list', params=params, json_response=True)
alerts_human_readable = []
alerts_context = []
for alert_id in response:
alert_human_readable, alert_context = get_alert_by_id_helper(alert_id)
alerts_human_readable.append(alert_human_readable)
alerts_context.append(alert_context)
return alerts_human_readable, alerts_context
def extract_mail(replies):
if not replies:
return ''
mails = []
for reply in replies:
mails.append(reply.get('Email'))
return '\n'.join(mails)
def extract_remediation(remidiations):
if not remidiations:
return ''
remedies = []
string_format = "{0} - Status: {1}"
for remedy in remidiations:
remedies.append(string_format.format(remedy.get('Value'), remedy.get('Status')))
return '\n'.join(remedies)
def hash_identifier(hash_val):
if md5Regex.match(hash_val):
return 'MD5'
if sha1Regex.match(hash_val):
return 'SHA1'
if sha256Regex.match(hash_val):
return 'SHA256'
return 'Unknown'
def extract_tags(tags):
pretty_tags = []
string_format = "ID: {0} - Name: {1}"
for tag in tags:
pretty_tags.append(string_format.format(tag.get('_id'), tag.get('Name')))
return pretty_tags
def get_alerts():
"""
Gets all alerts and returns as a list.
"""
alerts_human_readable, alerts_context = get_alerts_helper(handle_filters())
headers = ['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL',
'SourceEmail', 'SourceNetworkType', 'IsClosed', 'Closed', 'IsFlagged', 'Images', 'Tags',
'Description', 'Title', 'TakedownStatus', 'SubType']
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alerts_context},
'Contents': alerts_context,
'HumanReadable': tableToMarkdown('IntSights Alerts', alerts_human_readable, headers=headers, removeNull=False),
'ContentsFormat': formats['json']
})
def alert_to_readable(alert, parse_tags):
"""
Convert alert to readable format
"""
is_closed = demisto.get(alert, 'IsClosed')
if is_closed is None:
is_closed = demisto.get(alert, 'Closed.IsClosed')
readable = {
'ID': demisto.get(alert, '_id'),
'Severity': demisto.get(alert, 'Details.Severity'),
'Type': demisto.get(alert, 'Details.Type'),
'FoundDate': demisto.get(alert, 'FoundDate'),
'SourceType': demisto.get(alert, 'Details.Source.Type'),
'SourceURL': demisto.get(alert, 'Details.Source.URL'),
'SourceEmail': demisto.get(alert, 'Details.Source.Email'),
'SourceNetworkType': demisto.get(alert, 'Details.Source.NetworkType'),
'IsClosed': is_closed,
'IsFlagged': demisto.get(alert, 'IsFlagged'),
'Assets': demisto.get(alert, 'Assets'),
'Images': demisto.get(alert, 'Details.Images'),
'Description': demisto.get(alert, 'Details.Description'),
'Title': demisto.get(alert, 'Details.Title'),
'TakedownStatus': demisto.get(alert, 'TakedownStatus'),
'SubType': demisto.get(alert, 'Details.SubType'),
}
tags = demisto.get(alert, 'Details.Tags')
if parse_tags:
readable['Tags'] = extract_tags(tags)
else:
readable['Tag'] = []
for tag in tags:
readable['Tag'].append({'ID': tag.get('_id'), 'Name': tag.get('Name')})
return readable
def get_alert_by_id_helper(alert_id):
"""
Helper for getting details by ID
"""
response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True)
return alert_to_readable(response, True), alert_to_readable(response, False)
def get_alert_by_id():
"""
Get alert details by id
"""
alert_id = demisto.getArg('alert-id')
activity_hr, activity_ctx = get_alert_by_id_helper(alert_id)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': activity_ctx},
'Contents': activity_hr,
'HumanReadable': tableToMarkdown('IntSights Alert Details', [activity_hr],
['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL',
'SourceEmail', 'SourceNetworkType', 'IsClosed', 'IsFlagged',
'Images', 'Tags', 'Description', 'Title', 'TakedownStatus', 'SubType']),
'ContentsFormat': formats['json']
})
def get_alert_image():
"""
Retrieves the alert image by image_id
"""
image_id = demisto.getArg('image-id')
response = http_request('GET', 'public/v1/data/alerts/alert-image/' + image_id)
demisto.results(fileResult(image_id + '-image.jpeg', response.content))
def ask_analyst():
"""
Send question to an analyst about the requested alert
"""
alert_id = demisto.getArg('alert-id')
question = demisto.getArg('question')
http_request('POST', 'public/v1/data/alerts/ask-the-analyst/' + alert_id, json_data={'Question': question})
question_details = {'ID': alert_id, 'Question': question}
title = 'IntSights Ask the Analyst: ' \
'Your question has been successfully sent to an analyst about the requested alert'
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': question_details},
'Contents': question_details,
'HumanReadable': tableToMarkdown(title, [question_details], ['ID', 'Question']),
'ContentsFormat': formats['json']
}
)
def get_alert_activity():
"""
Retrieves the alert activity by alert-id
"""
alert_id = demisto.getArg('alert-id')
response = http_request('GET', 'public/v1/data/alerts/activity-log/' + alert_id, json_response=True)
alert = {'ID': alert_id, 'Activities': []}
if not response:
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert},
'Contents': response,
'HumanReadable': 'Alert {} does not have activities.'.format(alert_id),
'ContentsFormat': formats['json']
})
else:
human_readable_arr = []
for activity in response:
alert['Activities'].append({
'ID': demisto.get(activity, '_id'),
'Type': demisto.get(activity, 'Type'),
'Initiator': demisto.get(activity, 'Initiator'),
'CreatedDate': demisto.get(activity, 'CreatedDate'),
'UpdateDate': demisto.get(activity, 'UpdateDate'),
'RemediationBlocklistUpdate': demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate'),
'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')},
'Mail': {'Replies': demisto.get(activity, 'AdditionalInformation.Mail.Replies')},
'ReadBy': demisto.get(activity, 'ReadBy')
})
human_readable_arr.append({
'ID': demisto.get(activity, '_id'),
'Type': demisto.get(activity, 'Type'),
'Initiator': demisto.get(activity, 'Initiator'),
'CreatedDate': demisto.get(activity, 'CreatedDate'),
'UpdateDate': demisto.get(activity, 'UpdateDate'),
'RemediationBlocklistUpdate': extract_remediation(
demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate'))
if demisto.get(activity, 'AdditionalInformation') else '',
'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')},
'Mail': extract_mail(
demisto.get(activity, 'AdditionalInformation.Mail.Replies'))
if demisto.get(activity, 'AdditionalInformation.Mail') else '',
'ReadBy': demisto.get(activity, 'ReadBy')
})
headers = ['ID', 'Type', 'Initiator', 'CreatedDate', 'UpdateDate',
'RemediationBlocklistUpdate', 'AskTheAnalyst', 'Mail', 'ReadBy']
human_readable = tableToMarkdown('IntSights Alert {} Activity Log'.format(alert_id),
t=human_readable_arr, headers=headers),
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert},
'Contents': response,
'HumanReadable': human_readable,
'ContentsFormat': formats['json']
})
def change_severity():
"""
Change severity of an alert
"""
alert_id = demisto.getArg('alert-id')
severity = demisto.getArg('severity')
http_request('PATCH', 'public/v1/data/alerts/change-severity/' + alert_id, json_data={'Severity': severity})
severity_details = {'ID': alert_id, 'Severity': severity}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': severity_details},
'Contents': severity_details,
'HumanReadable': tableToMarkdown(
'IntSights Update Alert Severity: The Alert severity has been successfully updated.', [severity_details],
['ID', 'Severity']),
'ContentsFormat': formats['json']
})
def get_assignee_id(assignee_email):
response = http_request('GET', 'public/v1/account/users-details', json_response=True)
for user in response:
if assignee_email == user.get('Email', ''):
return user.get('_id')
raise Exception('user not found')
def assign_alert():
"""
Assign alert to an Assignee ID
"""
alert_id = demisto.getArg('alert-id')
assignee_email = demisto.getArg('assignee-email')
is_mssp = demisto.getArg('is-mssp-optional')
assignee_id = get_assignee_id(assignee_email)
assign_details = {'ID': alert_id, 'Assignees.AssigneeID': assignee_id}
url = 'public/v1/data/alerts/assign-alert/' + alert_id
if is_mssp:
url += '?IsMssp=' + is_mssp
http_request('PATCH', url, json_data={'AssigneeID': assignee_id})
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': assign_details},
'Contents': assign_details,
'HumanReadable': tableToMarkdown(
'IntSights Assign Alert: The Alert has been successfully assigned to assigneeID', [assign_details],
['ID', 'Assignees.AssigneeID']),
'ContentsFormat': formats['json']
})
def unassign_alert():
"""
Unassign an alert
"""
alert_id = demisto.getArg('alert-id')
http_request('PATCH', 'public/v1/data/alerts/unassign-alert/' + alert_id)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id}},
'Contents': {'ID': alert_id},
'HumanReadable': 'Alert id: ' + alert_id + ' successfully unassigned',
'ContentsFormat': formats['json']
})
def close_alert():
"""
Close an alert
"""
alert_id = demisto.getArg('alert-id')
reason = demisto.getArg('reason')
free_text = demisto.getArg('free-text')
is_hidden = demisto.getArg('is-hidden') == 'True'
rate = demisto.getArg('rate')
close_details = {'ID': alert_id, 'Close Reason': reason, 'Closed FreeText': free_text, 'Closed Rate': rate,
'IsHidden': is_hidden}
close_details_context = {'ID': alert_id, 'Closed': {'Reason': reason, 'FreeText': free_text, 'Rate': rate},
'IsHidden': is_hidden}
url = 'public/v1/data/alerts/close-alert/' + alert_id
json_data = {'Reason': reason}
if free_text:
json_data['FreeText'] = free_text
if is_hidden:
json_data['IsHidden'] = is_hidden
if rate:
json_data['Rate'] = rate
http_request('PATCH', url, json_data)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': close_details},
'Contents': close_details_context,
'HumanReadable': tableToMarkdown('IntSights Close Alert: The Alert has successfully been closed.',
[close_details],
['ID', 'Close Reason', 'Closed FreeText', 'Closed Rate', 'IsHidden']),
'ContentsFormat': formats['json']
})
def send_mail():
"""
Send email with the alert details and a question
"""
alert_id = demisto.getArg('alert-id')
emails = argToList(demisto.getArg('emails'))
content = demisto.getArg('content')
http_request('POST', 'public/v1/data/alerts/send-mail/' + alert_id, {'Emails': emails, 'Content': content})
context = {
'ID': alert_id,
'EmailID': emails,
'Question': content
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Email with content (' + content + ') sent to emails',
'ContentsFormat': formats['json']
})
def get_tag_id(alert_id, tag_name):
response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True)
details = response.get('Details', {})
tags = details.get('Tags', [])
for tag in tags:
if tag.get('Name', '') == tag_name:
return tag.get('_id', '')
return 'Not found'
def add_tag():
"""
Adds a tag to the alert
"""
alert_id = demisto.getArg('alert-id')
tag_name = demisto.getArg('tag-name')
http_request('PATCH', 'public/v1/data/alerts/add-tag/' + alert_id, json_data={'TagName': tag_name})
tag_info = {
'TagName': tag_name,
'ID': get_tag_id(alert_id, tag_name)
}
context = {
'ID': alert_id,
'Tags': tag_info
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Tag (' + tag_name + ') added to alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def remove_tag():
"""
Removes a tag from an alert
"""
alert_id = demisto.getArg('alert-id')
tag_id = demisto.getArg('tag-id')
http_request('PATCH', 'public/v1/data/alerts/remove-tag/' + alert_id, json_data={'TagID': tag_id})
context = {
'ID': alert_id,
'Tags': {'ID': tag_id}
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Tag id: ' + tag_id + ' removed from alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def add_comment():
"""
Adds a comment to an alert
"""
alert_id = demisto.getArg('alert-id')
comment = demisto.getArg('comment')
http_request('PATCH', 'public/v1/data/alerts/add-comment/' + alert_id, json_data={'Comment': comment})
context = {
'ID': alert_id,
'Comment': comment
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Succesfully added comment "' + comment + '" to alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def ioc_to_readable(ioc_data):
"""
Convert IOC to readable format
"""
ioc_context = {
'ID': demisto.get(ioc_data, '_id'),
'SourceID': demisto.get(ioc_data, 'SourceID'),
'AccountID': demisto.get(ioc_data, 'AccountID'),
'Type': demisto.get(ioc_data, 'Type'),
'Value': demisto.get(ioc_data, 'Value'),
'FirstSeen': demisto.get(ioc_data, 'FirstSeen'),
'LastSeen': demisto.get(ioc_data, 'LastSeen'),
'Domain': demisto.get(ioc_data, 'Domain'),
'Status': demisto.get(ioc_data, 'Status'),
'Severity': demisto.get(ioc_data, 'Severity'),
'SourceName': demisto.get(ioc_data, 'Source.Name'),
'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'),
'Flags': {'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa')},
'Enrichment': {
'Status': demisto.get(ioc_data, 'Enrichment.Status'),
'Data': demisto.get(ioc_data, 'Enrichment.Data'),
'Date': demisto.get(ioc_data, 'Enrichment.Data') # Backwards compatibility issue
}
}
ioc_readable = {
'ID': demisto.get(ioc_data, '_id'),
'SourceID': demisto.get(ioc_data, 'SourceID'),
'AccountID': demisto.get(ioc_data, 'AccountID'),
'Type': demisto.get(ioc_data, 'Type'),
'Value': demisto.get(ioc_data, 'Value'),
'FirstSeen': demisto.get(ioc_data, 'FirstSeen'),
'LastSeen': demisto.get(ioc_data, 'LastSeen'),
'Domain': demisto.get(ioc_data, 'Domain'),
'Status': demisto.get(ioc_data, 'Status'),
'Severity': demisto.get(ioc_data, 'Severity').get('Value'),
'SourceName': demisto.get(ioc_data, 'Source.Name'),
'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'),
'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa'),
'Enrichment Status': demisto.get(ioc_data, 'Enrichment.Status'),
'Enrichment Data': demisto.get(ioc_data, 'Enrichment.Data')
}
dbot_score = {
'Indicator': ioc_context['Value'],
'Type': IOC_TYPE_TO_DBOT_TYPE[ioc_context['Type']],
'Vendor': 'IntSights',
'Score': translate_severity(ioc_readable['Severity'])
}
malicious_dict = {
'Vendor': 'IntSights',
'Description': 'IntSights severity level is High'
}
domain = {}
if ioc_context['Domain']:
domain['Name'] = ioc_context['Domain']
if translate_severity(ioc_readable['Severity']) == 3:
domain['Malicious'] = malicious_dict
ip_info = {}
if ioc_context['Type'] == 'IpAddresses':
ip_info['Address'] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
ip_info['Malicious'] = malicious_dict
url_info = {}
if ioc_context['Type'] == 'Urls':
url_info['Data'] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
url_info['Malicious'] = malicious_dict
hash_info = {}
if ioc_context['Type'] == 'Hashes':
hash_info['Name'] = ioc_context['Value']
hash_info[hash_identifier(ioc_context['Value'])] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
hash_info['Malicious'] = malicious_dict
return ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info
def search_for_ioc():
"""
Search for IOC by value
"""
response = http_request('GET', 'public/v1/iocs/ioc-by-value', params=handle_filters(), json_response=True)
if response:
ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(response)
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Iocs(val.ID === obj.ID)': ioc_context,
'DBotScore': dbot_score,
'Domain': domain,
'IP': ip_info,
'URL': url_info,
'File': hash_info
},
'Contents': response,
'HumanReadable': tableToMarkdown('IOC Information', [ioc_readable],
['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen',
'LastSeen', 'Domain', 'Status', 'Severity', 'SourceName',
'SourceConfidence', 'IsInAlexa', 'Enrichment Status',
'Enrichment Data']),
'ContentsFormat': formats['json']
}
)
else:
results_for_no_content('IOC Information')
def results_for_no_content(cmd_name):
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {'IntSights': {}},
'Contents': {},
'HumanReadable': '### {} \n\n Could not get any results.'.format(cmd_name),
'ContentsFormat': formats['json']
}
)
def translate_severity(sev):
"""
Translate alert severity to demisto
"""
if sev in ['Medium', 'High']:
return 3
if sev == 'Low':
return 2
return 0
def fetch_incidents():
"""
Fetch incidents for Demisto
"""
last_run = demisto.getLastRun()
demisto.info("IntSight fetch last run time is: {}".format(str(last_run)))
if not last_run or 'time' not in last_run:
fetch_delta, _ = parse_date_range(demisto.params().get('fetch_delta', DEFAULT_TIME_RANGE), to_timestamp=True)
else:
fetch_delta = last_run.get('time')
current_fetch = fetch_delta
alert_type = demisto.getParam('type')
min_severity_level = demisto.params().get('severity_level', 'All')
if min_severity_level not in SEVERITY_LEVEL:
raise Exception("Minimum Alert severity level to fetch incidents incidents from, allowed values are: All,"
" Low, Medium, High. (Setting to All will fetch all incidents)")
_, alerts_context = get_alerts_helper(handle_filters(fetch_delta))
incidents = []
for alert in alerts_context:
if SEVERITY_LEVEL[min_severity_level] <= SEVERITY_LEVEL[alert.get('Severity', 'Low')]:
if not alert_type or alert_type.lower() == alert.get('Type', '').lower():
incidents.append({
'name': '{type} - {id}'.format(type=alert.get('Type', 'Type not found'), id=alert.get('ID')),
'occurred': alert.get('FoundDate'),
'severity': translate_severity(alert.get('Severity')),
'rawJSON': json.dumps(alert)
})
alert_timestamp = date_to_timestamp(alert.get('FoundDate'), date_format='%Y-%m-%dT%H:%M:%S.%fZ')
if alert_timestamp > current_fetch:
current_fetch = alert_timestamp
demisto.incidents(incidents)
demisto.setLastRun({'time': current_fetch + 1000})
def get_iocs():
"""
Gets all IOCs with the given filters
"""
response = http_request('GET', 'public/v1/iocs/complete-iocs-list', params=handle_filters(), json_response=True)
domains = []
ip_infos = []
url_infos = []
hash_infos = []
dbot_scores = []
iocs_context = []
iocs_readable = []
for indicator in response:
ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(indicator)
iocs_context.append(ioc_context)
iocs_readable.append(ioc_readable)
dbot_scores.append(dbot_score)
domains.append(domain)
ip_infos.append(ip_info)
url_infos.append(url_info)
hash_infos.append(hash_info)
headers = ['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen', 'LastSeen',
'Domain', 'Status', 'Severity', 'SourceName', 'SourceConfidence',
'IsInAlexa', 'Enrichment Status', 'Enrichment Data']
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Iocs': iocs_context,
'DBotScore': dbot_scores,
'Domain': domains,
'IP': ip_infos,
'URL': url_infos,
'File': hash_infos
},
'Contents': response,
'HumanReadable': tableToMarkdown('IOC Information', t=iocs_readable, headers=headers),
'ContentsFormat': formats['json']
}
)
def takedown_request():
"""
Request alert takedown
"""
alert_id = demisto.getArg('alert-id')
http_request('PATCH', 'public/v1/data/alerts/takedown-request/' + alert_id)
context = {
'ID': alert_id,
}
human_readable = '### IntSights Alert Takedown\n' \
'The Alert Takedown request has been sent successfully for {}'.format(str(alert_id))
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': human_readable,
'ContentsFormat': formats['json']
})
def get_alert_takedown_status():
"""
Get an alert's takedown status
"""
alert_id = demisto.getArg('alert-id')
response = http_request('GET', 'public/v1/data/alerts/takedown-status/' + alert_id)
context = {
'ID': alert_id,
'TakedownStatus': response.text
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': tableToMarkdown('IntSights Alert Takedown Status', [context], ['ID', 'TakedownStatus']),
'ContentsFormat': formats['json']
})
def update_ioc_blocklist_status():
alert_id = demisto.getArg('alert-id')
types = argToList(demisto.getArg('type'))
values = argToList(demisto.getArg('value'))
statuses = argToList(demisto.getArg('blocklist-status'))
if len(types) != len(values) or len(types) != len(statuses):
return_error('The lists must be of equal length. For each IOC, provide an entry in each list.')
data = []
for count, type_ in enumerate(types):
data.append({
'Type': type_,
'Value': values[count],
'BlocklistStatus': statuses[count]
})
http_request('PATCH', 'public/v1/data/alerts/change-iocs-blocklist-status/' + alert_id, json_data={'Iocs': data})
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': statuses}},
'Contents': {'ID': alert_id, 'Status': statuses},
'HumanReadable': tableToMarkdown('IntSights Update IOC BlockList Status for ' + alert_id, data,
['BlocklistStatus']),
'ContentsFormat': formats['json']
})
def get_ioc_blocklist_status():
alert_id = demisto.getArg('alert-id')
response = http_request('GET', 'public/v1/data/alerts/blocklist-status/' + alert_id, json_response=True)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': [ioc.get('Status') for ioc in response]}},
'Contents': response,
'HumanReadable': tableToMarkdown('IntSights Blocklist Status for ' + alert_id, response, ['Status']),
'ContentsFormat': formats['json']
})
def get_mssp_sub_accounts():
account_id = demisto.getParam('credentials')['identifier']
accounts = http_request('GET', 'public/v1/mssp/customers', json_response=True)
if not accounts:
return_error("intsights-mssp-get-sub-accounts failed to return data.")
# Fix accounts _id keys
for account in accounts:
account["ID"] = account["_id"]
del account["_id"]
if len(accounts) < 1:
return_error('Current MSSP Account has no sub accounts.')
account_ids = [i["ID"] for i in accounts]
if MSSP_ACCOUNT_ID not in account_ids:
demisto.log("[DEBUG] - MSSP sub accounts:" + str(accounts))
return_error('Entered sub account id ({}) is not part of this mssp account'.format(MSSP_ACCOUNT_ID))
for i, account in enumerate(account_ids):
# Call account
HEADERS['Account-Id'] = account
account_ua = http_request('GET', 'public/v1/account/used-assets', json_response=True)
if not account_ua:
continue
accounts[i].update(account_ua)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.MsspAccount(val.ID === obj.ID)': accounts},
'HumanReadable': tableToMarkdown('IntSights MSSP accounts used assets ' + account_id, accounts,
["ID", 'CompanyName', "Status", "AssetsLimit", "AssetsCount"]),
'Contents': accounts,
'ContentsFormat': formats['json']
})
# Restore the header
HEADERS['Account-Id'] = MSSP_ACCOUNT_ID
def test_module():
http_request('GET', 'public/v1/api/version')
if demisto.params().get('isFetch'):
min_severity_level = demisto.params().get('severity_level', 'All')
if min_severity_level not in SEVERITY_LEVEL:
return_error("Minimum Alert severity level to fetch incidents incidents from, allowed values are: "
"All, Low, Medium, High. (Setting to All will fetch all incidents)")
demisto.results('ok')
try:
if demisto.command() == 'test-module':
test_module()
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'intsights-mssp-get-sub-accounts':
get_mssp_sub_accounts()
elif demisto.command() == 'intsights-get-alerts':
get_alerts()
elif demisto.command() == 'intsights-get-alert-image':
get_alert_image()
elif demisto.command() == 'intsights-get-alert-activities':
get_alert_activity()
elif demisto.command() == 'intsights-assign-alert':
assign_alert()
elif demisto.command() == 'intsights-unassign-alert':
unassign_alert()
elif demisto.command() == 'intsights-send-mail':
send_mail()
elif demisto.command() == 'intsights-ask-the-analyst':
ask_analyst()
elif demisto.command() == 'intsights-add-tag-to-alert':
add_tag()
elif demisto.command() == 'intsights-remove-tag-from-alert':
remove_tag()
elif demisto.command() == 'intsights-add-comment-to-alert':
add_comment()
elif demisto.command() == 'intsights-update-alert-severity':
change_severity()
elif demisto.command() == 'intsights-get-alert-by-id':
get_alert_by_id()
elif demisto.command() == 'intsights-get-ioc-by-value':
search_for_ioc()
elif demisto.command() == 'intsights-get-iocs':
get_iocs()
elif demisto.command() == 'intsights-alert-takedown-request':
takedown_request()
elif demisto.command() == 'intsights-get-alert-takedown-status':
get_alert_takedown_status()
elif demisto.command() == 'intsights-get-ioc-blocklist-status':
get_ioc_blocklist_status()
elif demisto.command() == 'intsights-update-ioc-blocklist-status':
update_ioc_blocklist_status()
elif demisto.command() == 'intsights-close-alert':
close_alert()
else:
raise Exception('Unrecognized command: ' + demisto.command())
except Exception as err:
return_error(str(err))
|
"""
This module contains common reusable functions.
"""
from traceback import print_stack
from configparser import ConfigParser
from SupportLibraries.ui_helpers import UIHelpers
class BaseHelpers(UIHelpers):
"""
This class includes basic reusable base_helpers.
"""
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def load_properties_file(self):
"""
This method loads the properties/ini file
:return: this method returns config reader instance.
"""
config = None
try:
# noinspection PyBroadException
config = ConfigParser()
config.read('test.ini')
except Exception as ex:
self.log.error("Failed to load ini/properties file.", ex)
print_stack()
return config
|
#!/usr/bin/env python
import sys, os, pwd, grp, signal, time, base64
from resource_management import *
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, Directory, File
from resource_management.core.shell import call
from resource_management.core.system import System
from resource_management.libraries.functions.default import default
def airflow_make_systemd_scripts_webserver(env):
import params
env.set_params(params)
confFileText = format("""[Unit]
Description=Airflow webserver daemon
After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service
Wants=postgresql.service mysql.service redis.service rabbitmq-server.service
[Service]
EnvironmentFile=/etc/sysconfig/airflow
User={airflow_user}
Group={airflow_group}
Type=simple
ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow webserver -D --pid /usr/local/airflow/airflow-webserver.pid --stderr /var/log/airflow/webserver.err --stdout /var/log/airflow/webserver.out -l /var/log/airflow/webserver.log
PIDFile=/usr/local/airflow/airflow-webserver.pid
Restart=always
RestartSec=5s
SyslogIdentifier=airflow-scheduler
[Install]
WantedBy=multi-user.target
""")
with open("/etc/systemd/system/multi-user.target.wants/airflow-webserver.service", 'w') as configFile:
configFile.write(confFileText)
configFile.close()
confFileText = format("""AIRFLOW_HOME={airflow_home}
AIRFLOW_CONFIG={airflow_home}/airflow.cfg
PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin
""")
with open("/etc/sysconfig/airflow", 'w') as configFile:
configFile.write(confFileText)
configFile.close()
Execute("systemctl daemon-reload")
def airflow_make_systemd_scripts_scheduler(env):
import params
env.set_params(params)
confFileText = format("""[Unit]
Description=Airflow scheduler daemon
After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service
Wants=postgresql.service mysql.service redis.service rabbitmq-server.service
[Service]
EnvironmentFile=/etc/sysconfig/airflow
User={airflow_user}
Group={airflow_group}
Type=simple
ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow scheduler -D --pid /usr/local/airflow/airflow-scheduler.pid --stderr /var/log/airflow/scheduler.err --stdout /var/log/airflow/scheduler.out -l /var/log/airflow/scheduler.log
PIDFile=/usr/local/airflow/airflow-scheduler.pid
Restart=always
RestartSec=5s
SyslogIdentifier=airflow-scheduler
[Install]
WantedBy=multi-user.target
""")
with open("/etc/systemd/system/multi-user.target.wants/airflow-scheduler.service", 'w') as configFile:
configFile.write(confFileText)
configFile.close()
confFileText = format("""AIRFLOW_HOME={airflow_home}
AIRFLOW_CONFIG={airflow_home}/airflow.cfg
PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin
""")
with open("/etc/sysconfig/airflow", 'w') as configFile:
configFile.write(confFileText)
configFile.close()
Execute("systemctl daemon-reload")
def airflow_make_systemd_scripts_worker(env):
import params
env.set_params(params)
confFileText = format("""[Unit]
Description=Airflow worker daemon
After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service
Wants=postgresql.service mysql.service redis.service rabbitmq-server.service
[Service]
EnvironmentFile=/etc/sysconfig/airflow
User={airflow_user}
Group={airflow_group}
Type=simple
ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow worker -D --pid /usr/local/airflow/airflow-worker.pid --stderr /var/log/airflow/worker.err --stdout /var/log/airflow/worker.out -l /var/log/airflow/worker.log
PIDFile=/usr/local/airflow/airflow-worker.pid
Restart=always
RestartSec=5s
SyslogIdentifier=airflow-worker
[Install]
WantedBy=multi-user.target
""")
with open("/etc/systemd/system/multi-user.target.wants/airflow-worker.service", 'w') as configFile:
configFile.write(confFileText)
configFile.close()
confFileText = format("""AIRFLOW_HOME={airflow_home}
AIRFLOW_CONFIG={airflow_home}/airflow.cfg
PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin
""")
with open("/etc/sysconfig/airflow", 'w') as configFile:
configFile.write(confFileText)
configFile.close()
Execute("systemctl daemon-reload")
def airflow_generate_config_for_section(sections):
"""
Generating values for airflow.cfg for each section.
This allows to add custom-site configuration from ambari to cfg file.
"""
result = {}
for section, data in sections.items():
section_config = ""
for key, value in data.items():
section_config += format("{key} = {value}\n")
result[section] = section_config
return result
def airflow_configure(env):
import params
env.set_params(params)
airflow_config_file = ""
airflow_config = airflow_generate_config_for_section({
"core" : params.config['configurations']['airflow-core-site'],
"cli" : params.config['configurations']['airflow-cli-site'],
"api" : params.config['configurations']['airflow-api-site'],
"operators" : params.config['configurations']['airflow-operators-site'],
"webserver" : params.config['configurations']['airflow-webserver-site'],
"email" : params.config['configurations']['airflow-email-site'],
"smtp" : params.config['configurations']['airflow-smtp-site'],
"celery" : params.config['configurations']['airflow-celery-site'],
"dask" : params.config['configurations']['airflow-dask-site'],
"scheduler" : params.config['configurations']['airflow-scheduler-site'],
"ldap" : params.config['configurations']['airflow-ldap-site'],
"mesos" : params.config['configurations']['airflow-mesos-site'],
"kerberos" : params.config['configurations']['airflow-kerberos-site'],
"github_enterprise" : params.config['configurations']['airflow-githubenterprise-site'],
"admin" : params.config['configurations']['airflow-admin-site'],
"lineage" : params.config['configurations']['airflow-lineage-site'],
"atlas" : params.config['configurations']['airflow-atlas-site'],
"hive" : params.config['configurations']['airflow-hive-site'],
"celery_broker_transport_options" : params.config['configurations']['airflow-celerybrokertransportoptions-site'],
"elasticsearch" : params.config['configurations']['airflow-elasticsearch-site'],
"kubernetes" : params.config['configurations']['airflow-kubernetes-site'],
"kubernetes_secrets" : params.config['configurations']['airflow-kubernetessecrets-site']
})
for section, value in airflow_config.items():
airflow_config_file += format("[{section}]\n{value}\n")
with open(params.airflow_home + "/airflow.cfg", 'w') as configFile:
configFile.write(airflow_config_file)
configFile.close()
|
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for testing gjslint components."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import StringIO
from closure_linter import ecmametadatapass
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
def TokenizeSource(source):
"""Convert a source into a string of tokens.
Args:
source: A source file as a string or file-like object (iterates lines).
Returns:
The first token of the resulting token stream.
"""
if isinstance(source, basestring):
source = StringIO.StringIO(source)
tokenizer = javascripttokenizer.JavaScriptTokenizer()
return tokenizer.TokenizeFile(source)
def TokenizeSourceAndRunEcmaPass(source):
"""Tokenize a source and run the EcmaMetaDataPass on it.
Args:
source: A source file as a string or file-like object (iterates lines).
Returns:
The first token of the resulting token stream.
"""
start_token = TokenizeSource(source)
ecma_pass = ecmametadatapass.EcmaMetaDataPass()
ecma_pass.Process(start_token)
return start_token
def ParseFunctionsAndComments(source, error_handler=None):
"""Run the tokenizer and tracker and return comments and functions found.
Args:
source: A source file as a string or file-like object (iterates lines).
error_handler: An error handler.
Returns:
The functions and comments as a tuple.
"""
start_token = TokenizeSourceAndRunEcmaPass(source)
tracker = javascriptstatetracker.JavaScriptStateTracker()
if error_handler is not None:
tracker.DocFlagPass(start_token, error_handler)
functions = []
comments = []
for token in start_token:
tracker.HandleToken(token, tracker.GetLastNonSpaceToken())
function = tracker.GetFunction()
if function and function not in functions:
functions.append(function)
comment = tracker.GetDocComment()
if comment and comment not in comments:
comments.append(comment)
tracker.HandleAfterToken(token)
return functions, comments
|
from .traffic_junction_hard import TrafficJunctionHard
|
"""
This file contains all the settings that defines the development server.
SECURITY WARNING: don't run with debug turned on in production!
"""
import logging
from typing import List
from server.settings.components import config
from server.settings.components.common import INSTALLED_APPS, MIDDLEWARE
# Setting the development status:
DEBUG = True
ALLOWED_HOSTS = [
config('DOMAIN_NAME'),
'localhost',
'0.0.0.0', # noqa: S104
'127.0.0.1',
'[::1]',
]
# Installed apps for development only:
INSTALLED_APPS += (
'debug_toolbar',
'nplusone.ext.django',
'django_migration_linter',
'django_test_migrations.contrib.django_checks.AutoNames',
'django_test_migrations.contrib.django_checks.DatabaseConfiguration',
'extra_checks',
)
# Static files:
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS: List[str] = []
# Django debug toolbar:
# https://django-debug-toolbar.readthedocs.io
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
# https://github.com/bradmontgomery/django-querycount
# Prints how many queries were executed, useful for the APIs.
'querycount.middleware.QueryCountMiddleware',
)
def _custom_show_toolbar(request):
"""Only show the debug toolbar to users with the superuser flag."""
return DEBUG and request.user.is_superuser
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK':
'server.settings.environments.development._custom_show_toolbar',
}
# This will make debug toolbar to work with django-csp,
# since `ddt` loads some scripts from `ajax.googleapis.com`:
CSP_SCRIPT_SRC = ("'self'", 'ajax.googleapis.com')
CSP_IMG_SRC = ("'self'", 'data:')
CSP_CONNECT_SRC = ("'self'",)
# nplusone
# https://github.com/jmcarp/nplusone
# Should be the first in line:
MIDDLEWARE = ( # noqa: WPS440
'nplusone.ext.django.NPlusOneMiddleware',
) + MIDDLEWARE
# Logging N+1 requests:
NPLUSONE_RAISE = True # comment out if you want to allow N+1 requests
NPLUSONE_LOGGER = logging.getLogger('django')
NPLUSONE_LOG_LEVEL = logging.WARN
NPLUSONE_WHITELIST = [
{'model': 'admin.*'},
]
# django-test-migrations
# https://github.com/wemake-services/django-test-migrations
# Set of badly named migrations to ignore:
DTM_IGNORED_MIGRATIONS = frozenset((
('axes', '*'),
))
# django-extra-checks
# https://github.com/kalekseev/django-extra-checks
EXTRA_CHECKS = {
'checks': [
# Forbid `unique_together`:
'no-unique-together',
# Require non empty `upload_to` argument:
'field-file-upload-to',
# Use the indexes option instead:
'no-index-together',
# Each model must be registered in admin:
'model-admin',
# FileField/ImageField must have non empty `upload_to` argument:
'field-file-upload-to',
# Text fields shouldn't use `null=True`:
'field-text-null',
# Prefer using BooleanField(null=True) instead of NullBooleanField:
'field-boolean-null',
# Don't pass `null=False` to model fields (this is django default)
'field-null',
# ForeignKey fields must specify db_index explicitly if used in
# other indexes:
{'id': 'field-foreign-key-db-index', 'when': 'indexes'},
# If field nullable `(null=True)`,
# then default=None argument is redundant and should be removed:
'field-default-null',
# Fields with choices must have companion CheckConstraint
# to enforce choices on database level
'field-choices-constraint',
],
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import auto, Enum
from typing import NamedTuple, Optional
class Parameter(NamedTuple):
class Kind(Enum):
ARG = auto()
VARARG = auto()
KWARG = auto()
name: str
annotation: Optional[str]
kind: Kind
def __eq__(self, other: "Parameter") -> bool:
if not isinstance(other, self.__class__):
return False
return self.name == other.name
|
from django.db import models
from django_encrypted_json.fields import EncryptedValueJsonField
# Create your models here.
class TestModel(models.Model):
json = EncryptedValueJsonField(default={})
optional_json = EncryptedValueJsonField(blank=True, null=True)
partial_encrypt = EncryptedValueJsonField(
blank=True, null=True, skip_keys=('test', ))
partial_encrypt_w_default = EncryptedValueJsonField(
blank=True, skip_keys=('test', ), default=[])
|
import asyncio
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # noqa
from runners.support.agent import DemoAgent, default_genesis_txns
from runners.support.utils import (
log_json,
log_msg,
log_status,
log_timer,
prompt,
prompt_loop,
require_indy,
)
LOGGER = logging.getLogger(__name__)
class AerLingusAgent(DemoAgent):
def __init__(self, http_port: int, admin_port: int, **kwargs):
super().__init__(
"Aer Lingus Agent",
http_port,
admin_port,
prefix="AerLingus",
extra_args=[
"--auto-accept-invites",
"--auto-accept-requests"
],
**kwargs,
)
self.connection_id = None
self._connection_ready = asyncio.Future()
self.cred_state = {}
# TODO define a dict to hold credential attributes based on
# the credential_definition_id
self.cred_attrs = {}
async def detect_connection(self):
await self._connection_ready
@property
def connection_ready(self):
return self._connection_ready.done() and self._connection_ready.result()
async def handle_connections(self, message):
if message["connection_id"] == self.connection_id:
if message["state"] == "active" and not self._connection_ready.done():
self.log("Connected")
self._connection_ready.set_result(True)
async def handle_issue_credential(self, message):
state = message["state"]
credential_exchange_id = message["credential_exchange_id"]
prev_state = self.cred_state.get(credential_exchange_id)
if prev_state == state:
return # ignore
self.cred_state[credential_exchange_id] = state
self.log(
"Credential: state =",
state,
", credential_exchange_id =",
credential_exchange_id,
)
if state == "request_received":
# TODO issue credentials based on the credential_definition_id
pass
async def handle_present_proof(self, message):
state = message["state"]
presentation_exchange_id = message["presentation_exchange_id"]
self.log(
"Presentation: state =",
state,
", presentation_exchange_id =",
presentation_exchange_id,
)
if state == "presentation_received":
# TODO handle received presentations
pass
async def handle_basicmessages(self, message):
self.log("Received message:", message["content"])
async def main(start_port: int, show_timing: bool = False):
genesis = await default_genesis_txns()
if not genesis:
print("Error retrieving ledger genesis transactions")
sys.exit(1)
agent = None
try:
log_status("#1 Provision an agent and wallet, get back configuration details")
agent = AerLingusAgent(
start_port, start_port + 1, genesis_data=genesis, timing=show_timing
)
await agent.listen_webhooks(start_port + 2)
await agent.register_did()
with log_timer("Startup duration:"):
await agent.start_process()
log_msg("Admin url is at:", agent.admin_url)
log_msg("Endpoint url is at:", agent.endpoint)
# Create a schema
log_status("#3 Create a new schema on the ledger")
with log_timer("Publish schema duration:"):
pass
# TODO define schema
# version = format(
# "%d.%d.%d"
# % (
# random.randint(1, 101),
# random.randint(1, 101),
# random.randint(1, 101),
# )
# )
# (
# schema_id,
# credential_definition_id,
# ) = await agent.register_schema_and_creddef(
# "employee id schema",
# version,
# ["employee_id", "name", "date", "position"],
# )
# with log_timer("Generate invitation duration:"):
# # Generate an invitation
# log_status(
# "#5 Create a connection to alice and print out the invite details"
# )
# connection = await agent.admin_POST("/connections/create-invitation")
# agent.connection_id = connection["connection_id"]
# log_json(connection, label="Invitation response:")
# log_msg("*****************")
# log_msg(json.dumps(connection["invitation"]), label="Invitation:", color=None)
# log_msg("*****************")
# log_msg("Waiting for connection...")
# await agent.detect_connection()
async for option in prompt_loop(
"(1) Issue Credential, (2) Send Proof Request, "
+ "(3) Send Message (X) Exit? [1/2/3/X] "
):
if option in "xX":
break
elif option == "1":
log_status("#13 Issue credential offer to X")
# TODO credential offers
elif option == "2":
log_status("#20 Request proof of degree from alice")
# TODO presentation requests
elif option == "3":
msg = await prompt("Enter message: ")
await agent.admin_POST(
f"/connections/{agent.connection_id}/send-message", {"content": msg}
)
if show_timing:
timing = await agent.fetch_timing()
if timing:
for line in agent.format_timing(timing):
log_msg(line)
finally:
terminated = True
try:
if agent:
await agent.terminate()
except Exception:
LOGGER.exception("Error terminating agent:")
terminated = False
await asyncio.sleep(0.1)
if not terminated:
os._exit(1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs an Aer Lingus demo agent.")
parser.add_argument(
"-p",
"--port",
type=int,
default=8040,
metavar=("<port>"),
help="Choose the starting port number to listen on",
)
parser.add_argument(
"--timing", action="store_true", help="Enable timing information"
)
args = parser.parse_args()
require_indy()
try:
asyncio.get_event_loop().run_until_complete(main(args.port, args.timing))
except KeyboardInterrupt:
os._exit(1)
|
# -*- coding: UTF-8 -*-
import os, sqlite3
from PySide2.QtWidgets import QComboBox
from moduels.component.NormalValue import 常量
# 添加预设对话框
class Combo_EngineList(QComboBox):
def __init__(self):
super().__init__()
self.initElements() # 先初始化各个控件
self.initSlots() # 再将各个控件连接到信号槽
self.initLayouts() # 然后布局
self.initValues() # 再定义各个控件的值
def initElements(self):
pass
def initSlots(self):
pass
def initLayouts(self):
pass
def initValues(self):
self.初始化列表()
def mousePressEvent(self, e):
self.列表更新()
self.showPopup()
def 初始化列表(self):
self.列表项 = []
数据库连接 = 常量.数据库连接
cursor = 数据库连接.cursor()
result = cursor.execute(f'''select 引擎名称 from {常量.语音引擎表单名} order by id;''').fetchall()
if len(result) != 0:
for item in result:
self.列表项.append(item[0])
self.addItems(self.列表项)
# if not os.path.exists(常量.音效文件路径): os.makedirs(常量.音效文件路径)
# with os.scandir(常量.音效文件路径) as 目录条目:
# for entry in 目录条目:
# if not entry.name.startswith('.') and entry.is_dir():
# self.列表项.append(entry.name)
def 列表更新(self):
新列表 = []
数据库连接 = 常量.数据库连接
cursor = 数据库连接.cursor()
result = cursor.execute(f'''select 引擎名称 from {常量.语音引擎表单名} order by id;''').fetchall()
if len(result) != 0:
for item in result:
新列表.append(item[0])
if self.列表项 == 新列表: return True
self.clear()
self.列表项 = 新列表
self.addItems(self.列表项)
|
"""Example 003: List envelopes in the user's account"""
from flask import render_template, url_for, redirect, session, flash, request
from os import path
import json
from app import app, ds_config, views
from datetime import datetime, timedelta
from docusign_esign import *
from docusign_esign.rest import ApiException
eg = "eg003" # reference (and url) for this example
def controller():
"""Controller router using the HTTP method"""
if request.method == 'GET':
return get_controller()
elif request.method == 'POST':
return create_controller()
else:
return render_template('404.html'), 404
def create_controller():
"""
1. Check the token
2. Call the worker method
3. Show results
"""
minimum_buffer_min = 3
if views.ds_token_ok(minimum_buffer_min):
# 2. Call the worker method
args = {
'account_id': session['ds_account_id'],
'base_path': session['ds_base_path'],
'ds_access_token': session['ds_access_token'],
}
try:
results = worker(args)
except ApiException as err:
error_body_json = err and hasattr(err, 'body') and err.body
# we can pull the DocuSign error code and message from the response body
error_body = json.loads(error_body_json)
error_code = error_body and 'errorCode' in error_body and error_body['errorCode']
error_message = error_body and 'message' in error_body and error_body['message']
# In production, may want to provide customized error messages and
# remediation advice to the user.
return render_template('error.html',
err=err,
error_code=error_code,
error_message=error_message
)
return render_template("example_done.html",
title="List envelopes results",
h1="List envelopes results",
message="Results from the Envelopes::listStatusChanges method:",
json=json.dumps(json.dumps(results.to_dict()))
)
else:
flash('Sorry, you need to re-authenticate.')
# We could store the parameters of the requested operation
# so it could be restarted automatically.
# But since it should be rare to have a token issue here,
# we'll make the user re-enter the form data after
# authentication.
session['eg'] = url_for(eg)
return redirect(url_for('ds_must_authenticate'))
# ***DS.snippet.0.start
def worker(args):
"""
1. Call the envelope status change method to list the envelopes
that have changed in the last 10 days
"""
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args['base_path']
api_client.set_default_header("Authorization", "Bearer " + args['ds_access_token'])
envelope_api = EnvelopesApi(api_client)
# The Envelopes::listStatusChanges method has many options
# See https://developers.docusign.com/esign-rest-api/reference/Envelopes/Envelopes/listStatusChanges
# The list status changes call requires at least a from_date OR
# a set of envelopeIds. Here we filter using a from_date.
# Here we set the from_date to filter envelopes for the last month
# Use ISO 8601 date format
from_date = (datetime.utcnow() - timedelta(days=10)).isoformat()
results = envelope_api.list_status_changes(args['account_id'], from_date = from_date)
return results
# ***DS.snippet.0.end
def get_controller():
"""responds with the form for the example"""
if views.ds_token_ok():
return render_template("eg003_list_envelopes.html",
title="List changed envelopes",
source_file=path.basename(__file__),
source_url=ds_config.DS_CONFIG['github_example_url'] + path.basename(__file__),
documentation=ds_config.DS_CONFIG['documentation'] + eg,
show_doc=ds_config.DS_CONFIG['documentation'],
)
else:
# Save the current operation so it will be resumed after authentication
session['eg'] = url_for(eg)
return redirect(url_for('ds_must_authenticate'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-22 12:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('prov_vo', '0005_add_activitydescription_entitydescription'),
]
operations = [
migrations.CreateModel(
name='UsedDescription',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('role', models.CharField(blank=True, max_length=128, null=True)),
('activityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityDescription')),
('entityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.EntityDescription')),
],
),
migrations.CreateModel(
name='WasGeneratedByDescription',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('role', models.CharField(blank=True, max_length=128, null=True)),
('activityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityDescription')),
('entityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.EntityDescription')),
],
),
migrations.AlterField(
model_name='entity',
name='rights',
field=models.CharField(blank=True, choices=[('voprov:public', 'voprov:public'), ('voprov:secure', 'voprov:secure'), ('voprov:proprietary', 'voprov:proprietary')], max_length=128, null=True),
),
migrations.AlterField(
model_name='hadmember',
name='collection',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Collection'),
),
migrations.AlterField(
model_name='hadmember',
name='entity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ecollection', to='prov_vo.Entity'),
),
migrations.AlterField(
model_name='hadstep',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='activityFlow', to='prov_vo.Activity'),
),
migrations.AlterField(
model_name='hadstep',
name='activityFlow',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityFlow'),
),
migrations.AlterField(
model_name='used',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'),
),
migrations.AlterField(
model_name='used',
name='entity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'),
),
migrations.AlterField(
model_name='wasassociatedwith',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'),
),
migrations.AlterField(
model_name='wasassociatedwith',
name='agent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Agent'),
),
migrations.AlterField(
model_name='wasattributedto',
name='agent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Agent'),
),
migrations.AlterField(
model_name='wasattributedto',
name='entity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'),
),
migrations.AlterField(
model_name='wasderivedfrom',
name='generatedEntity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'),
),
migrations.AlterField(
model_name='wasderivedfrom',
name='usedEntity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='generatedEntity', to='prov_vo.Entity'),
),
migrations.AlterField(
model_name='wasgeneratedby',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'),
),
migrations.AlterField(
model_name='wasgeneratedby',
name='entity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'),
),
migrations.AlterField(
model_name='wasinformedby',
name='informant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='informed', to='prov_vo.Activity'),
),
migrations.AlterField(
model_name='wasinformedby',
name='informed',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'),
),
migrations.AddField(
model_name='used',
name='description',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.UsedDescription'),
),
migrations.AddField(
model_name='wasgeneratedby',
name='description',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.WasGeneratedByDescription'),
),
]
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
from django.db import models
from account.models import UserAccount
# 行程資料
class Trip(models.Model):
#來源網站
strSource = models.CharField(max_length=255, null=False)
#原始 URL
strOriginUrl = models.CharField(db_index=True, max_length=255, null=False)
#主要圖片 url
strImageUrl = models.TextField(null=False)
#更新狀態 (out-of-date, up-to-date)
strUpdateStatus = models.CharField(max_length=255, null=True)
#更新日期
dtUpdateTime = models.DateTimeField(null=True)
#標題
strTitle = models.CharField(max_length=255, null=True)
#地點
strLocation = models.TextField(null=True)
#金額 (USD)
intUsdCost = models.IntegerField(null=True)
#導覽語言
strGuideLanguage = models.CharField(max_length=255, null=True)
#評價星數 (1-5)
intReviewStar = models.IntegerField(null=True)
#評價訪客數
intReviewVisitor = models.IntegerField(null=True)
#主要景點
strAttrations = models.CharField(max_length=255, null=True)
#摘要
strIntroduction = models.TextField(null=True)
#行程開始日期
dtDatetimeFrom = models.DateTimeField(null=True)
#行程結束日期
dtDatetimeTo = models.DateTimeField(null=True)
#行程總時數 (Hour)
intDurationHour = models.IntegerField(null=True)
#行程類型
strStyle = models.CharField(max_length=255, null=True)
#特殊選項編號
intOption = models.IntegerField(null=True)
#使用者偏好的行程
class FavoriteTrip(models.Model):
#使用者帳號 ForeignKey
fkUserAccount = models.ForeignKey(UserAccount, null=False, on_delete=models.CASCADE)
#行程 ForeignKey
fkTrip = models.ForeignKey(Trip, null=False, on_delete=models.CASCADE)
#設定 (json 格式)
strJsonSetting = models.TextField(null=True)
#使用者自訂 行程規劃
class CustomizedTripPlan(models.Model):
#使用者帳號 ForeignKey
fkUserAccount = models.ForeignKey(UserAccount, null=False, on_delete=models.CASCADE)
#行程規劃名稱
strName = models.CharField(max_length=255, null=True)
#封面圖
strImageUrl = models.TextField(null=True)
#完整行程規劃 開始日期
dtDatetimeFrom = models.DateTimeField(null=True)
#完整行程規劃 結束日期
dtDatetimeTo = models.DateTimeField(null=True)
#使用者自訂 行程規劃項目
class CustomizedTripPlanItem(models.Model):
#行程規劃 ForeignKey
fkCustomizedTripPlan = models.ForeignKey(CustomizedTripPlan, null=False, on_delete=models.CASCADE)
#項目標題
strTitle = models.CharField(max_length=255, null=True)
#原始 URL
strOriginUrl = models.TextField(null=True)
#主要圖片 url
strImageUrl = models.TextField(null=True)
#地點
strLocation = models.TextField(null=True)
#金額 (USD)
intUsdCost = models.IntegerField(null=True)
#行程總時數 (Hour)
intDurationHour = models.IntegerField(null=True)
#註解
strComment = models.TextField(null=True)
#經度
strLongitude = models.CharField(max_length=255, null=True)
#緯度
strLatitude = models.CharField(max_length=255, null=True)
#規劃開始日期
dtDatetimeFrom = models.DateTimeField(null=True)
#規劃結束日期
dtDatetimeTo = models.DateTimeField(null=True)
#匯率資料
class ExRate(models.Model):
#貨幣名稱
strCurrencyName = models.CharField(max_length=255, null=False)
#美金匯率
fUSDollar = models.FloatField(null=False)
#更新日期
dtUpdateTime = models.DateTimeField(null=False)
|
import inspect
import logging
import os
import re
import subprocess
from typing import Dict, Any
from pyhttpd.certs import CertificateSpec
from pyhttpd.conf import HttpdConf
from pyhttpd.env import HttpdTestEnv, HttpdTestSetup
log = logging.getLogger(__name__)
class H2TestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
self.add_source_dir(os.path.dirname(inspect.getfile(H2TestSetup)))
self.add_modules(["http2", "proxy_http2", "cgid", "autoindex", "ssl"])
def make(self):
super().make()
self._add_h2test()
self._setup_data_1k_1m()
def _add_h2test(self):
local_dir = os.path.dirname(inspect.getfile(H2TestSetup))
p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'],
capture_output=True,
cwd=os.path.join(local_dir, 'mod_h2test'))
rv = p.returncode
if rv != 0:
log.error(f"compiling md_h2test failed: {p.stderr}")
raise Exception(f"compiling md_h2test failed: {p.stderr}")
modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf')
with open(modules_conf, 'a') as fd:
# load our test module which is not installed
fd.write(f"LoadModule h2test_module \"{local_dir}/mod_h2test/.libs/mod_h2test.so\"\n")
def _setup_data_1k_1m(self):
s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n"
with open(os.path.join(self.env.gen_dir, "data-1k"), 'w') as f:
for i in range(10):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-10k"), 'w') as f:
for i in range(100):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-100k"), 'w') as f:
for i in range(1000):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-1m"), 'w') as f:
for i in range(10000):
f.write(f"{i:09d}-{s90}")
class H2TestEnv(HttpdTestEnv):
def __init__(self, pytestconfig=None):
super().__init__(pytestconfig=pytestconfig)
self.add_httpd_conf([
"H2MinWorkers 1",
"H2MaxWorkers 64",
"Protocols h2 http/1.1 h2c",
])
self.add_httpd_log_modules(["http2", "proxy_http2", "h2test"])
self.add_cert_specs([
CertificateSpec(domains=[
f"push.{self._http_tld}",
f"hints.{self._http_tld}",
f"ssl.{self._http_tld}",
f"pad0.{self._http_tld}",
f"pad1.{self._http_tld}",
f"pad2.{self._http_tld}",
f"pad3.{self._http_tld}",
f"pad8.{self._http_tld}",
]),
CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
'AH02032',
'AH01276',
'AH01630',
'AH00135',
'AH02261', # Re-negotiation handshake failed (our test_101)
'AH03490', # scoreboard full, happens on limit tests
])
self.httpd_error_log.add_ignored_patterns([
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
re.compile(r'.*:tls_post_process_client_hello:.*'),
re.compile(r'.*:tls_process_client_certificate:.*'),
re.compile(r'.*have incompatible TLS configurations.'),
])
def setup_httpd(self, setup: HttpdTestSetup = None):
super().setup_httpd(setup=H2TestSetup(env=self))
class H2Conf(HttpdConf):
def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None):
super().__init__(env=env, extras=HttpdConf.merge_extras(extras, {
f"cgi.{env.http_tld}": [
"SSLOptions +StdEnvVars",
"AddHandler cgi-script .py",
]
}))
def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=None,
ssl_module=None, with_certificates=None):
super().start_vhost(domains=domains, port=port, doc_root=doc_root,
with_ssl=with_ssl, ssl_module=ssl_module,
with_certificates=with_certificates)
if f"noh2.{self.env.http_tld}" in domains:
protos = ["http/1.1"]
elif port == self.env.https_port or with_ssl is True:
protos = ["h2", "http/1.1"]
else:
protos = ["h2c", "http/1.1"]
if f"test2.{self.env.http_tld}" in domains:
protos = reversed(protos)
self.add(f"Protocols {' '.join(protos)}")
return self
def add_vhost_noh2(self):
domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"]
self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
return self
def add_vhost_test1(self, proxy_self=False, h2proxy_self=False):
return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self)
def add_vhost_test2(self):
return super().add_vhost_test2()
|
from dgl import BatchedDGLGraph
from dgl.nn.pytorch.conv import GINConv
from torch import nn
from models.GNN.GNNModelBase import GNNModelBase
from models.utils import TypeConditionalLinear
class GIN(GNNModelBase):
"""
Graph Isomorphism Network as described in https://arxiv.org/pdf/1810.00826.pdf
"""
def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):
super().__init__(**kwargs)
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
apply_func_layers = sum(
[[nn.Linear(self.hidden_dim, self.hidden_dim),
self.get_act(),
self.get_norm(self.hidden_dim),
nn.Dropout(self.p_dropout)] for _ in
range(n_apply_func_layers)],
[])
apply_func = nn.Sequential(*apply_func_layers)
self.layers.append(GINConv(apply_func=apply_func,
aggregator_type=aggregator_type,
init_eps=init_eps,
learn_eps=learn_eps))
def gnn_forward(self, g: BatchedDGLGraph):
feats = g.ndata['h']
for layer in self.layers:
feats = layer(graph=g, feat=feats)
readout = self.readout(g, feats)
out = self.fcout(readout)
return out
class RelationalGIN(GNNModelBase):
"""
Version of GIN that passes edge-type-conditional messages
"""
def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):
super().__init__(**kwargs)
self.n_relations = 2 * len(
self.db_info['edge_type_to_int']) - 1 # there are negative edge types for the reverse edges
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
apply_func_layers = sum(
[[nn.Linear(self.hidden_dim, self.hidden_dim),
self.get_act(),
self.get_norm(self.hidden_dim),
nn.Dropout(self.p_dropout)] for _ in
range(n_apply_func_layers)],
[])
apply_func = nn.Sequential(*apply_func_layers)
self.layers.append(RelationalGINConv(apply_func=apply_func,
activation=self.get_act(),
aggregator_type=aggregator_type,
hidden_dim=self.hidden_dim,
init_eps=init_eps,
learn_eps=learn_eps,
num_rels=self.n_relations))
def gnn_forward(self, g: BatchedDGLGraph):
feats = g.ndata['h']
etypes = g.edata['edge_types'] + self.n_relations // 2
for layer in self.layers:
feats = layer(graph=g, feat=feats, etypes=etypes)
readout = self.readout(g, feats)
out = self.fcout(readout)
return out
class RelationalGINConv(GINConv):
def __init__(self, apply_func, activation, aggregator_type, hidden_dim, init_eps=0, learn_eps=False, num_rels=0):
super().__init__(apply_func, aggregator_type, init_eps, learn_eps)
self.num_rels = num_rels
self.act = activation
self.edge_message_layer = TypeConditionalLinear(hidden_dim, hidden_dim, num_rels)
def message_func(self, edges):
msg = edges.src['h']
msg = self.edge_message_layer(msg, edges.data['type'])
msg = self.act(msg)
return {'msg': msg}
def forward(self, graph, feat, etypes):
graph = graph.local_var()
graph.ndata['h'] = feat
graph.edata['type'] = etypes
graph.update_all(self.message_func, self._reducer('msg', 'neigh'))
rst = (1 + self.eps) * feat + graph.ndata['neigh']
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
class ERGIN(RelationalGIN):
"""
GIN using different linear mappings for each node and edge type
"""
def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):
super().__init__(n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs)
self.n_node_types = len(self.db_info['node_type_to_int'])
self.act = self.get_act()
self.layers = nn.ModuleList()
self.apply_func_blocks = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(RelationalGINConv(apply_func=None,
activation=self.get_act(),
aggregator_type=aggregator_type,
hidden_dim=self.hidden_dim,
init_eps=init_eps,
learn_eps=learn_eps,
num_rels=self.n_relations))
self.apply_func_blocks.append(
nn.ModuleList([nn.ModuleDict({'tcl': TypeConditionalLinear(self.hidden_dim,
self.hidden_dim,
self.n_node_types),
'act': self.get_act(),
'norm': self.get_norm(self.hidden_dim),
'do': nn.Dropout(self.p_dropout)
})
for _ in range(n_apply_func_layers)])
)
def gnn_forward(self, g: BatchedDGLGraph):
feats = g.ndata['h']
ntypes = g.ndata['node_types']
etypes = g.edata['edge_types'] + self.n_relations // 2
for layer, apply_func_blocks in zip(self.layers, self.apply_func_blocks):
feats = layer(graph=g, feat=feats, etypes=etypes)
for block in apply_func_blocks:
feats = block['tcl'](feats, ntypes)
feats = block['act'](feats)
feats = block['norm'](feats)
feats = block['do'](feats)
readout = self.readout(g, feats)
out = self.fcout(readout)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.