text
stringlengths 2
999k
|
|---|
async def get_location_count() -> int:
return 234
async def get_locations_used() -> int:
return 230
|
from __future__ import annotations
import asyncio
from datetime import datetime
from typing import TYPE_CHECKING, Union, Optional, List, Dict, Any
import discord
from .base import DatabaseChecker
from .punishments import Punisher
if TYPE_CHECKING:
from .punishments import Punishment
from discord.ext import commands
__all__ = ("UnbanFailure", "BanManager")
class UnbanFailure(Exception):
"""Raises an exception when the user tries to unban a discord.User without passing the guild."""
class BanManager(DatabaseChecker, Punisher):
"""
A BanManager that manages guild bans.
"""
__slots__ = ("bot",)
def __init__(self, bot: commands.Bot):
super().__init__(
[
{
"guild": "snowflake",
"member": "snowflake",
"reason": "string",
"timestamp": "snowflake",
}
],
["bans"],
)
self.bot = bot
self.add_event(self._on_database_connect, "on_database_connect")
async def _on_database_connect(self):
self.bot.loop.create_task(self.__check_bans())
@DatabaseChecker.uses_database
async def get_banned_members(self) -> List[Dict[str, Any]]:
"""
|coro|
This function returns all the members that are supposed to be unbanned but are banned.
:return: The list of unbanned members.
:rtype: List[Dict[str, Any]]
"""
return [
x
for x in await self.database.select(self.tables["bans"], [], fetchall=True)
if x["timestamp"] <= datetime.utcnow().timestamp()
]
async def __check_bans(self) -> None:
"""
|coro|
A loop that ensures that members are unbanned when they need to.
:return: None
:rtype: None
"""
await self.bot.wait_until_ready()
while not self.bot.is_closed():
for banned_member in await self.get_banned_members():
guild = self.bot.get_guild(banned_member["guild"])
if guild is None:
continue
user = await self.bot.fetch_user(banned_member["member"])
if await self.unban(user, guild):
await self.call_event("on_unban", user, banned_member["reason"])
await asyncio.sleep(300)
async def punish(
self, ctx: commands.Context, member: discord.Member, punishment: Punishment
) -> None:
try:
self.bot.loop.create_task(
self.ban(
member,
punishment.punishment_reason,
punishment.punishment_time.total_seconds(),
)
)
except discord.errors.Forbidden as e:
raise e
else:
await self.call_event("on_punishment", ctx, member, punishment)
@staticmethod
async def get_ban(
member: Union[discord.Member, discord.User], guild: discord.Guild
) -> Optional[discord.User]:
"""
|coro|
This function returns the user object of the member if he is banned from the guild.
:param member: The banned member.
:type member: discord.Member
:param guild: The guild.
:type guild: discord.Guild
:return: The user object if found.
:rtype: Optional[discord.User]
"""
banned = await guild.bans()
for x in banned:
if x.user.id == member.id:
return x.user
@DatabaseChecker.uses_database
async def unban(
self, member: Union[discord.Member, discord.User], guild: discord.Guild = None
) -> bool:
"""
|coro|
Unbans the member from the guild.
:param Union[discord.Member, discord.User] member: The member or user to unban.
:param discord.Guild guild: The guild to unban the member from.
:return: A bool representing if the unban was successful.
:rtype: bool
:raises: UnbanFailure: Cannot unban a discord.User without a guild.
"""
if isinstance(member, discord.User) and not guild:
raise UnbanFailure("Cannot unban a discord.User without a guild.")
guild = guild if guild is not None else member.guild
await self.database.delete(
self.tables["bans"], {"guild": guild.id, "member": member.id}
)
if user := await self.get_ban(member, guild):
await guild.unban(user)
return True
async def __handle_unban(
self, time_of_ban: Union[int, float], member: discord.Member, reason: str
) -> None:
"""
|coro|
A function that handles the member's unban that runs separately from the ban method so it wont be blocked.
:param Union[int, float] time_of_ban: The time until the member's unban timestamp.
:param discord.Member member: The member to unban.
:param str reason: The reason of the mute.
:return: None
:rtype: None
"""
await asyncio.sleep(time_of_ban)
if await self.unban(member):
await self.call_event("on_unban", member, reason)
@DatabaseChecker.uses_database
async def ban(
self,
member: discord.Member,
reason: str = "No reason provided.",
time_of_ban: Union[int, float] = 0,
) -> None:
"""
|coro|
Bans the member from the guild.
:param member: The member to ban.
:type member: discord.Member
:param reason: The reason of the ban.
:type reason: str
:param time_of_ban: The time of ban.
:type time_of_ban: Union[int, float]
:return: None
:rtype: None
"""
await member.ban(reason=reason)
if time_of_ban <= 0:
return
await self.database.insert(
self.tables["bans"],
{
"guild": member.guild.id,
"member": member.id,
"reason": reason,
"timestamp": datetime.utcnow().timestamp() + time_of_ban,
},
)
self.bot.loop.create_task(self.__handle_unban(time_of_ban, member, reason))
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import JavabitTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(JavabitTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[1], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[3], node0_address, 50)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 50})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111111", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
# coding: utf-8
# In[1]:
import numpy as np
def get_homograph(u,v):
A = np.array([[u[0][0], u[0][1], 1, 0, 0, 0, -1 * u[0][0] * v[0][0], -1 * u[0][1] * v[0][0]],
[0, 0, 0, u[0][0], u[0][1], 1, -1 * u[0][0] * v[0][1], -1 * u[0][1] * v[0][1]],
[u[1][0], u[1][1], 1, 0, 0, 0, -1 * u[1][0] * v[1][0], -1 * u[1][1] * v[1][0]],
[0, 0, 0, u[1][0], u[1][1], 1, -1 * u[1][0] * v[1][1], -1 * u[1][1] * v[1][1]],
[u[2][0], u[2][1], 1, 0, 0, 0, -1 * u[2][0] * v[2][0], -1 * u[2][1] * v[2][0]],
[0, 0, 0, u[2][0], u[2][1], 1, -1 * u[2][0] * v[2][1], -1 * u[2][1] * v[2][1]],
[u[3][0], u[3][1], 1, 0, 0, 0, -1 * u[3][0] * v[3][0], -1 * u[3][1] * v[3][0]],
[0, 0, 0, u[3][0], u[3][1], 1, -1 * u[3][0] * v[3][1], -1 * u[3][1] * v[3][1]]
])
b = np.array([[v[0][0]],
[v[0][1]],
[v[1][0]],
[v[1][1]],
[v[2][0]],
[v[2][1]],
[v[3][0]],
[v[3][1]]
])
tmp = np.dot(np.linalg.inv(A), b)
H = np.array([[tmp[0][0], tmp[1][0], tmp[2][0]],
[tmp[3][0], tmp[4][0], tmp[5][0]],
[tmp[6][0], tmp[7][0], 1]
])
return H
def interpolation(img, new_x, new_y):
fx = round(new_x - int(new_x), 2)
fy = round(new_y - int(new_y), 2)
p = np.zeros((3,))
p += (1 - fx) * (1 - fy) * img[int(new_y), int(new_x)]
p += (1 - fx) * fy * img[int(new_y) + 1, int(new_x)]
p += fx * (1 - fy) * img[int(new_y), int(new_x) + 1]
p += fx * fy * img[int(new_y) + 1, int(new_x) + 1]
return p
def forward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v)
i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for i in range(i1_range):
for j in range(i0_range):
tmp2 = np.dot(matrix, np.array([[j+i0_min, i+i1_min, 1]]).T)
x, y = int(tmp2[0][0] / tmp2[2][0]), int(tmp2[1][0] / tmp2[2][0])
canvas[y][x] = input_image[i+i1_min][j+i0_min]
return canvas
def backward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v) # v: output, u: input
i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for j in range(i1_range):
for i in range(i0_range):
new_pos = np.dot(matrix, np.array([[i+i0_min, j+i1_min, 1]]).T)
new_x, new_y = new_pos[0][0] / new_pos[2][0], new_pos[1][0] / new_pos[2][0]
res = interpolation(input_image, new_x, new_y)
canvas[j+i1_min][i+i0_min] = res
return canvas
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: FlatGeobuf driver test suite.
# Author: Björn Harrtell <bjorn@wololo.org>
#
###############################################################################
# Copyright (c) 2018-2019, Björn Harrtell <bjorn@wololo.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
from osgeo import ogr
from osgeo import gdal
import gdaltest
import ogrtest
import pytest
### utils
def verify_flatgeobuf_copy(name, fids, names):
if gdaltest.features is None:
print('Missing features collection')
return False
fname = os.path.join('tmp', name + '.fgb')
ds = ogr.Open(fname)
if ds is None:
print('Can not open \'' + fname + '\'')
return False
lyr = ds.GetLayer(0)
if lyr is None:
print('Missing layer')
return False
######################################################
# Test attributes
ret = ogrtest.check_features_against_list(lyr, 'FID', fids)
if ret != 1:
print('Wrong values in \'FID\' field')
return False
lyr.ResetReading()
ret = ogrtest.check_features_against_list(lyr, 'NAME', names)
if ret != 1:
print('Wrong values in \'NAME\' field')
return False
######################################################
# Test geometries
lyr.ResetReading()
for i in range(len(gdaltest.features)):
orig_feat = gdaltest.features[i]
feat = lyr.GetNextFeature()
if feat is None:
print('Failed trying to read feature')
return False
if ogrtest.check_feature_geometry(feat, orig_feat.GetGeometryRef(),
max_error=0.001) != 0:
print('Geometry test failed')
gdaltest.features = None
return False
gdaltest.features = None
lyr = None
return True
def copy_shape_to_flatgeobuf(name, wkbType, compress=None, options=[]):
if gdaltest.flatgeobuf_drv is None:
return False
if compress is not None:
if compress[0:5] == '/vsig':
dst_name = os.path.join('/vsigzip/', 'tmp', name + '.fgb' + '.gz')
elif compress[0:4] == '/vsiz':
dst_name = os.path.join('/vsizip/', 'tmp', name + '.fgb' + '.zip')
elif compress == '/vsistdout/':
dst_name = compress
else:
return False
else:
dst_name = os.path.join('tmp', name + '.fgb')
ds = gdaltest.flatgeobuf_drv.CreateDataSource(dst_name)
if ds is None:
return False
######################################################
# Create layer
lyr = ds.CreateLayer(name, None, wkbType, options)
if lyr is None:
return False
######################################################
# Setup schema (all test shapefiles use common schmea)
ogrtest.quick_create_layer_def(lyr,
[('FID', ogr.OFTReal),
('NAME', ogr.OFTString)])
######################################################
# Copy in shp
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
src_name = os.path.join('data', name + '.shp')
shp_ds = ogr.Open(src_name)
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
gdaltest.features = []
while feat is not None:
gdaltest.features.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
shp_lyr = None
lyr = None
ds = None
return True
### tests
def test_ogr_flatgeobuf_1():
gdaltest.flatgeobuf_drv = ogr.GetDriverByName('FlatGeobuf')
if gdaltest.flatgeobuf_drv is not None:
return
pytest.fail()
def test_ogr_flatgeobuf_2():
fgb_ds = ogr.Open('data/testfgb/poly.fgb')
fgb_lyr = fgb_ds.GetLayer(0)
# test expected spatial filter feature count consistency
c = fgb_lyr.GetFeatureCount()
assert c == 10
c = fgb_lyr.SetSpatialFilterRect(478315.531250, 4762880.500000, 481645.312500, 4765610.500000)
c = fgb_lyr.GetFeatureCount()
assert c == 10
c = fgb_lyr.SetSpatialFilterRect(878315.531250, 4762880.500000, 881645.312500, 4765610.500000)
c = fgb_lyr.GetFeatureCount()
assert c == 0
c = fgb_lyr.SetSpatialFilterRect(479586.0,4764618.6,479808.2,4764797.8)
c = fgb_lyr.GetFeatureCount()
if ogrtest.have_geos():
assert c == 4
else:
assert c == 5
# check that ResetReading does not affect subsequent enumeration or filtering
num = len(list([x for x in fgb_lyr]))
if ogrtest.have_geos():
assert num == 4
else:
assert num == 5
fgb_lyr.ResetReading()
c = fgb_lyr.GetFeatureCount()
if ogrtest.have_geos():
assert c == 4
else:
assert c == 5
fgb_lyr.ResetReading()
num = len(list([x for x in fgb_lyr]))
if ogrtest.have_geos():
assert num == 4
else:
assert num == 5
def wktRoundtrip(expected):
ds = ogr.GetDriverByName('FlatGeobuf').CreateDataSource('/vsimem/test.fgb')
g = ogr.CreateGeometryFromWkt(expected)
lyr = ds.CreateLayer('test', None, g.GetGeometryType(), [])
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(g)
lyr.CreateFeature(f)
ds = None
fgb_ds = ogr.Open('/vsimem/test.fgb')
fgb_lyr = fgb_ds.GetLayer(0)
f = fgb_lyr.GetNextFeature()
g = f.GetGeometryRef()
actual = g.ExportToWkt()
fgb_ds = None
ogr.GetDriverByName('FlatGeobuf').DeleteDataSource('/vsimem/test.fgb')
assert not gdal.VSIStatL('/vsimem/test.fgb')
assert actual == expected
def test_ogr_flatgeobuf_3():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
wktRoundtrip('POINT (1 1)')
wktRoundtrip('POINT (1.1234 1.4321)')
wktRoundtrip('POINT (1.12345678901234 1.4321)') # max precision 15 decimals
#wktRoundtrip('POINT (1.123456789012341 1.4321)') # 16 decimals, will not pass
wktRoundtrip('POINT (1.2 -2.1)')
wktRoundtrip('MULTIPOINT (10 40,40 30,20 20,30 10)')
wktRoundtrip('LINESTRING (1.2 -2.1,2.4 -4.8)')
wktRoundtrip('MULTILINESTRING ((10 10,20 20,10 40),(40 40,30 30,40 20,30 10),(50 50,60 60,50 90))')
wktRoundtrip('MULTILINESTRING ((1.2 -2.1,2.4 -4.8))')
wktRoundtrip('POLYGON ((30 10,40 40,20 40,10 20,30 10))')
wktRoundtrip('POLYGON ((35 10,45 45,15 40,10 20,35 10),(20 30,35 35,30 20,20 30))')
wktRoundtrip('MULTIPOLYGON (((30 20,45 40,10 40,30 20)),((15 5,40 10,10 20,5 10,15 5)))')
wktRoundtrip('MULTIPOLYGON (((40 40,20 45,45 30,40 40)),((20 35,10 30,10 10,30 5,45 20,20 35),(30 20,20 15,20 25,30 20)))')
wktRoundtrip('MULTIPOLYGON (((30 20,45 40,10 40,30 20)))')
wktRoundtrip('MULTIPOLYGON (((35 10,45 45,15 40,10 20,35 10),(20 30,35 35,30 20,20 30)))')
#wktRoundtrip('POINT ZM (1 2 3 4)')
# Run test_ogrsf
def test_ogr_flatgeobuf_8():
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/testfgb/poly.fgb')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
def test_ogr_flatgeobuf_9():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
gdaltest.tests = [
['gjpoint', [1], ['Point 1'], ogr.wkbPoint],
['gjline', [1], ['Line 1'], ogr.wkbLineString],
['gjpoly', [1], ['Polygon 1'], ogr.wkbPolygon],
['gjmultipoint', [1], ['MultiPoint 1'], ogr.wkbMultiPoint],
['gjmultiline', [2], ['MultiLine 1'], ogr.wkbMultiLineString],
['gjmultipoly', [2], ['MultiPoly 1'], ogr.wkbMultiPolygon]
]
for i in range(len(gdaltest.tests)):
test = gdaltest.tests[i]
rc = copy_shape_to_flatgeobuf(test[0], test[3])
assert rc, ('Failed making copy of ' + test[0] + '.shp')
rc = verify_flatgeobuf_copy(test[0], test[1], test[2])
assert rc, ('Verification of copy of ' + test[0] + '.shp failed')
for i in range(len(gdaltest.tests)):
test = gdaltest.tests[i]
rc = copy_shape_to_flatgeobuf(test[0], test[3], None, ['SPATIAL_INDEX=NO'])
assert rc, ('Failed making copy of ' + test[0] + '.shp')
rc = verify_flatgeobuf_copy(test[0], test[1], test[2])
assert rc, ('Verification of copy of ' + test[0] + '.shp failed')
# Test support for multiple layers in a directory
def test_ogr_flatgeobuf_directory():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
ds = ogr.GetDriverByName('FlatGeobuf').CreateDataSource('/vsimem/multi_layer')
with gdaltest.error_handler(): # name will be laundered
ds.CreateLayer('foo<', geom_type = ogr.wkbPoint)
ds.CreateLayer('bar', geom_type = ogr.wkbPoint)
ds = None
ds = gdal.OpenEx('/vsimem/multi_layer')
assert set(ds.GetFileList()) == set(['/vsimem/multi_layer/bar.fgb', '/vsimem/multi_layer/foo_.fgb'])
assert ds.GetLayer('foo<')
assert ds.GetLayer('bar')
ds = None
ogr.GetDriverByName('FlatGeobuf').DeleteDataSource('/vsimem/multi_layer')
assert not gdal.VSIStatL('/vsimem/multi_layer')
|
import re
class DisambiguatorPrefixRule7(object):
"""Disambiguate Prefix Rule 7
Rule 7 : terCerv -> ter-CerV where C != 'r'
"""
def disambiguate(self, word):
"""Disambiguate Prefix Rule 7
Rule 7 : terCerv -> ter-CerV where C != 'r'
"""
matches = re.match(r'^ter([bcdfghjklmnpqrstvwxyz])er([aiueo].*)$', word)
if matches:
if matches.group(1) == 'r':
return
return matches.group(1) + 'er' + matches.group(2)
|
import librosa
import soundfile
import os, glob, pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result=np.hstack((result, mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result=np.hstack((result, chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result=np.hstack((result, mel))
return result
emotions={
'01':'neutral',
'02':'calm',
'03':'happy',
'04':'sad',
'05':'angry',
'06':'fearful',
'07':'disgust',
'08':'surprised'
}
#DataFlair - Emotions to observe
observed_emotions=['calm', 'happy', 'fearful', 'disgust']
def load_data(ts):
tr=abs(1-ts)
x,y=[],[]
for file in glob.glob("D:\\python\\dl programs\\SP\\DATA\\Actor_*\\*.wav"):
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
print(emotion)
if emotion not in observed_emotions:
continue
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=ts, train_size=tr ,random_state=9)
ts=0.25
load_data(ts)
x_train,x_test,y_train,y_test=load_data(ts)
print((x_train.shape[0], x_test.shape[0]))
print(f'Features extracted: {x_train.shape[1]}')
#DataFlair - Initialize the Multi Layer Perceptron Classifier
model=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)
model.fit(x_train,y_train)
y_pred=model.predict(x_test)
accuracy=accuracy_score(y_true=y_test, y_pred=y_pred)
#DataFlair - Print the accuracy
print("Accuracy: {:.2f}%".format(accuracy*100))
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import logging
from collections import OrderedDict
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
FlaubertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
)
from .configuration_utils import PretrainedConfig
from .tokenization_albert import AlbertTokenizer
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .tokenization_bert_japanese import BertJapaneseTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
logger = logging.getLogger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(T5Config, (T5Tokenizer, None)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, None)),
(CamembertConfig, (CamembertTokenizer, None)),
(XLMRobertaConfig, (XLMRobertaTokenizer, None)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, TransfoXLTokenizerFast)),
(XLNetConfig, (XLNetTokenizer, None)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
]
)
class AutoTokenizer:
r""":class:`~transformers.AutoTokenizer` is a generic tokenizer class
that will be instantiated as one of the tokenizer classes of the library
when created with the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method take care of returning the correct tokenizer class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The tokenizer class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5Tokenizer (T5 model)
- contains `distilbert`: DistilBertTokenizer (DistilBert model)
- contains `albert`: AlbertTokenizer (ALBERT model)
- contains `camembert`: CamembertTokenizer (CamemBERT model)
- contains `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
- contains `roberta`: RobertaTokenizer (RoBERTa model)
- contains `bert`: BertTokenizer (Bert model)
- contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- contains `xlnet`: XLNetTokenizer (XLNet model)
- contains `xlm`: XLMTokenizer (XLM model)
- contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)
This class cannot be instantiated using `__init__()` (throw an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r""" Instantiate one of the tokenizer classes of the library
from a pre-trained model vocabulary.
The tokenizer class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5Tokenizer (T5 model)
- contains `distilbert`: DistilBertTokenizer (DistilBert model)
- contains `albert`: AlbertTokenizer (ALBERT model)
- contains `camembert`: CamembertTokenizer (CamemBERT model)
- contains `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
- contains `roberta`: RobertaTokenizer (RoBERTa model)
- contains `bert-base-japanese`: BertJapaneseTokenizer (Bert model)
- contains `bert`: BertTokenizer (Bert model)
- contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- contains `xlnet`: XLNetTokenizer (XLNet model)
- contains `xlm`: XLMTokenizer (XLM model)
- contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
use_fast: (`optional`) boolean, default True:
Indicate if transformers should try to load the fast version of the tokenizer (True) or use the Python one (False).
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# Download vocabulary from S3 and cache.
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if "bert-base-japanese" in pretrained_model_name_or_path:
return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
use_fast = kwargs.pop("use_fast", True)
for config_class, (tokenizer_class_py, tokenizer_class_fast) in TOKENIZER_MAPPING.items():
if isinstance(config, config_class):
if tokenizer_class_fast and use_fast:
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError(
"Unrecognized configuration class {} to build an AutoTokenizer.\n"
"Model type should be one of {}.".format(
config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys())
)
)
|
"""
WSGI config for ifollow project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "ifollow.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ifollow.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
"""
====================================================
Compute LCMV inverse solution in volume source space
====================================================
Compute LCMV beamformers on an auditory evoked dataset in a volume source
space, and show activation on ``fsaverage``.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.beamformer import make_lcmv, apply_lcmv
print(__doc__)
###############################################################################
# Data preprocessing:
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
fetch_fsaverage(subjects_dir) # ensure fsaverage src exists
fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
# Get epochs
event_id, tmin, tmax = [1, 2], -0.2, 0.5
# Read forward model
forward = mne.read_forward_solution(fname_fwd)
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.find_events(raw)
# Pick the channels of interest
raw.pick(['meg', 'eog'])
# Read epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
# Visualize sensor space data
evoked.plot_joint()
###############################################################################
# Compute covariance matrices
# ---------------------------
#
# These matrices need to be inverted at some point, but since they are rank
# deficient, some regularization needs to be done for them to be invertable.
# Regularization can be added either by the :func:`mne.compute_covariance`
# function or later by the :func:`mne.beamformer.make_lcmv` function. In this
# example, we'll go with the latter option, so we specify ``method='empirical``
# here.
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0,
method='empirical')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='empirical')
###############################################################################
# Compute beamformer filters
# --------------------------
#
# Compute weights of free orientation (vector) beamformer with weight
# normalization (neural activity index, NAI). Providing a noise covariance
# matrix enables whitening of the data and forward solution. Source orientation
# is optimized by setting pick_ori to 'max-power'.
# weight_norm can also be set to 'unit-noise-gain'. Source orientation can also
# be 'normal' (but only when using a surface-based source space) or None,
# which computes a vector beamfomer. Note, however, that not all combinations
# of orientation selection and weight normalization are implemented yet.
filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='nai', rank=None)
print(filters)
# You can save these with:
# filters.save('filters-lcmv.h5')
# Apply this spatial filter to the evoked data.
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
###############################################################################
# Plot source space activity
# --------------------------
# You can save result in stc files with:
# stc.save('lcmv-vol')
lims = [0.3, 0.6, 0.9]
stc.plot(
src=forward['src'], subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), mode='stat_map',
initial_time=0.1, verbose=True)
###############################################################################
# Now let's plot this on a glass brain, which will automatically transform the
# data to MNI Talairach space:
# sphinx_gallery_thumbnail_number = 4
stc.plot(
src=forward['src'], subject='sample', subjects_dir=subjects_dir,
mode='glass_brain', clim=dict(kind='value', lims=lims),
initial_time=0.1, verbose=True)
###############################################################################
# Finally let's get another view, this time plotting again a ``'stat_map'``
# style but using volumetric morphing to get data to fsaverage space,
# which we can get by passing a :class:`mne.SourceMorph` as the ``src``
# argument to `mne.VolSourceEstimate.plot`. To save a bit of speed when
# applying the morph, we will crop the STC:
src_fs = mne.read_source_spaces(fname_fs_src)
morph = mne.compute_source_morph(
forward['src'], subject_from='sample', src_to=src_fs,
subjects_dir=subjects_dir,
niter_sdr=[10, 10, 5], niter_affine=[10, 10, 5], # just for speed
verbose=True)
stc_fs = morph.apply(stc.copy().crop(0.05, 0.18))
stc_fs.plot(
src=src_fs, mode='stat_map', initial_time=0.1, subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), verbose=True)
|
## -*- coding: utf-8 -*-
from .vendor.Qt import QtCore, QtGui, QtWidgets
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as OpenMayaUI
import maya.OpenMaya as OpenMaya
import json
import os
def maya_version():
return int(cmds.about(v=True)[:4])
def maya_api_version():
return int(cmds.about(api=True))
if 2017 <= maya_version():
import shiboken2 as shiboken
else:
import shiboken
def get_anim_curve_editor():
return cmds.animCurveEditor('graphEditor1GraphEd', q=True, control=True)
def get_play_back_slider():
return mel.eval("$_=$gPlayBackSlider")
def get_timeline_wiget():
_pbs = get_play_back_slider()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w
def get_anim_curve_editor_wiget():
_pbs = get_anim_curve_editor()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
if _c is None:
return None
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w.children()[1]
def get_timeline_highlight_range():
_pbs = get_play_back_slider()
_r = cmds.timeControl(_pbs, q=True, ra=True)
return _r[0], _r[1]
def get_timeline_renge():
r = cmds.timeControl(get_play_back_slider(), query=True, ra=True)
return [int(r[0]), int(r[1]) - 1]
def draw_data_to_multi_line_data(draw_data):
lines = []
for d in draw_data:
_dfr = d['fr']
_append = False
for line in lines:
_overlap = False
for l in line:
_lfr = l['fr']
# 既存のデータのフレーム範囲に追加分のフレームが被っている
if _lfr[0] <= _dfr[0] <= _lfr[1] or _lfr[0] <= _dfr[1] <= _lfr[1]:
_overlap = True
break
# 追加分のフレーム範囲が既存のデータをすっぽり包んでいる
if _dfr[0] <= _lfr[0] <= _dfr[1] and _dfr[0] <= _lfr[1] <= _dfr[1]:
_overlap = True
break
if not _overlap:
line.append(d)
_append = True
break
# 新しい行追加
if not _append:
lines.append([d])
return lines
#-----------------------------------------------------------------------------
# EOF
#-----------------------------------------------------------------------------
|
# coding=utf-8
# Copyright 2017-2019 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
_ENGINE = None
def enable_distributed_training():
global _ENGINE
try:
import horovod.tensorflow as hvd
_ENGINE = hvd
hvd.init()
except ImportError:
sys.stderr.write("Error: You must install horovod first in order to"
" enable distributed training.\n")
exit()
def is_distributed_training_mode():
return _ENGINE is not None
def rank():
return _ENGINE.rank()
def local_rank():
return _ENGINE.local_rank()
def size():
return _ENGINE.size()
def all_reduce(tensor):
return _ENGINE.allreduce(tensor, compression=_ENGINE.Compression.fp16)
def get_broadcast_hook():
return _ENGINE.BroadcastGlobalVariablesHook(0)
|
from django.urls import reverse
from oscar.test.testcases import WebTestCase
from oscar.apps.partner import models
class TestPartnerDashboard(WebTestCase):
is_staff = True
def test_allows_a_partner_user_to_be_created(self):
partner = models.Partner.objects.create(
name="Acme Ltd")
url = reverse('dashboard:partner-list')
list_page = self.get(url)
detail_page = list_page.click("Manage partner and users")
user_page = detail_page.click("Link a new user")
form = user_page.form
form['first_name'] = "Maik"
form['last_name'] = "Hoepfel"
form['email'] = "maik@gmail.com"
form['password1'] = "helloworld"
form['password2'] = "helloworld"
form.submit()
self.assertEqual(1, partner.users.all().count())
|
default_app_config = 'repeating_tasks.apps.RepeatingTasksConfig'
|
import external_movie_data
import fresh_tomatoes
import json
import media
def validate_movie_info(movie_info):
# TODO: Aritmethic Error handling
# Convert and round rating range
rating = round(movie_info['rating'] * 5 / 10, 1)
# TODO: Supply the list to the view and loop through it
# Check if genres list is empty
genres_list = movie_info['genres'] if movie_info['genres'] else ['Uncategorized']
# Check if genres list is empty
overview = movie_info['overview'] if movie_info['overview'] else 'No overview available'
# Check if tagline is empty
tagline = movie_info['tagline'] if movie_info['tagline'] else '--'
return rating, genres_list, overview, tagline
def construct_movie(movie_info, movies_list):
# Obtain validated movie's info
rating, genres_list, overview, tagline = validate_movie_info(movie_info)
# Create a custom media 'Movie' object and populate a list of movies
movie = media.Movie(movie_info['title'].encode('utf-8'), overview.encode('utf-8'), movie_info['poster_image'],
movie_info['video_trailer'], tagline, genres_list, movie_info['release_date'],
movie_info['runtime'], rating)
movies_list.append(movie)
# List of favourite movies, fill in with yours
movies_searchlist = ['the third man', 'modern times', 'cinema paradiso', 'the asphalt jungle', '12 angry men',
"schindler's list", 'Les quatre cents coups ', 'el angel exterminador', 'midnight cowboy',
'the lady from shanghai', 'double indemnity', 'shadow of a doubt', 'M' , 'citizen kane', 'dr strangelove']
# Empty list to be populated with media 'Movie' objects
movies = []
print str(len(movies_searchlist)) + " movies found"
print "Connecting 'themoviedb' to get some movie data.."
# Loop through movies to grab some cool info
for index, movie in enumerate(movies_searchlist):
index += 1
movie_id = external_movie_data.get_movie_id(movie)
if movie_id != -1:
movie_json = external_movie_data.get_movie_info(movie_id)
print 'Movie #' + str(index) + ' fetched!'
# Create own 'Movie' object and append to list
construct_movie(json.loads(movie_json), movies)
# Call the helper function to build dynamically an static HTML page
fresh_tomatoes.open_movies_page(movies)
|
import numpy as np
from cottonwood.core.activation import Tanh
from cottonwood.core.initializers import LSUV
from cottonwood.core.layers.generic_layer import GenericLayer
from cottonwood.core.optimizers import SGD
import cottonwood.core.toolbox as tb
class Dense(GenericLayer):
def __init__(
self,
n_outputs,
m_inputs=None,
activation_function=None,
dropout_rate=0,
initializer=None,
previous_layer=None,
optimizer=None,
):
self.previous_layer = previous_layer
if m_inputs is not None:
self.m_inputs = m_inputs
else:
self.m_inputs = self.previous_layer.y.size
self.n_outputs = int(n_outputs)
self.activation_function = activation_function
self.dropout_rate = dropout_rate
if activation_function is None:
self.activation_function = Tanh()
else:
self.activation_function = activation_function
if initializer is None:
self.initializer = LSUV()
else:
self.initializer = initializer
if optimizer is None:
self.optimizer = SGD()
else:
self.optimizer = optimizer
# Choose random weights.
# Inputs match to rows. Outputs match to columns.
# Add one to m_inputs to account for the bias term.
self.weights = self.initializer.initialize(
self.m_inputs + 1, self.n_outputs)
self.reset()
self.regularizers = []
def __str__(self):
"""
Make a descriptive, human-readable string for this layer.
"""
str_parts = [
"fully connected",
f"number of inputs: {self.m_inputs}",
f"number of outputs: {self.n_outputs}",
"activation function:" + tb.indent(
self.activation_function.__str__()),
"initialization:" + tb.indent(self.initializer.__str__()),
"optimizer:" + tb.indent(self.optimizer.__str__()),
]
for regularizer in self.regularizers:
str_parts.append(
"regularizer:" + tb.indent(regularizer.__str__()))
return "\n".join(str_parts)
def add_regularizer(self, new_regularizer):
self.regularizers.append(new_regularizer)
def reset(self):
self.x = np.zeros((1, self.m_inputs))
self.y = np.zeros((1, self.n_outputs))
self.de_dx = np.zeros((1, self.m_inputs))
self.de_dy = np.zeros((1, self.n_outputs))
def forward_pass(self, evaluating=False, **kwargs):
"""
Propagate the inputs forward through the network.
evaluating: boolean
Is this part of a training run or an evaluation run?
"""
if self.previous_layer is not None:
self.x += self.previous_layer.y
# Apply dropout only during training runs.
if evaluating:
dropout_rate = 0
else:
dropout_rate = self.dropout_rate
if dropout_rate > 0:
self.i_dropout = np.zeros(self.x.size, dtype=bool)
self.i_dropout[np.where(
np.random.uniform(size=self.x.size) < dropout_rate)] = True
self.x[:, self.i_dropout] = 0
self.x[:, np.logical_not(self.i_dropout)] *= 1 / (1 - dropout_rate)
else:
self.i_dropout = None
bias = np.ones((1, 1))
x_w_bias = np.concatenate((self.x, bias), axis=1)
v = x_w_bias @ self.weights
self.y = self.activation_function.calc(v)
def backward_pass(self):
"""
Propagate the outputs back through the layer.
"""
bias = np.ones((1, 1))
x_w_bias = np.concatenate((self.x, bias), axis=1)
dy_dv = self.activation_function.calc_d(self.y)
# v = self.x @ self.weights
dv_dw = x_w_bias.transpose()
dv_dx = self.weights.transpose()
dy_dw = dv_dw @ dy_dv
self.de_dw = self.de_dy * dy_dw
for regularizer in self.regularizers:
regularizer.pre_optim_update(self)
self.optimizer.update(self)
for regularizer in self.regularizers:
regularizer.post_optim_update(self)
self.de_dx = (self.de_dy * dy_dv) @ dv_dx
# Remove the dropped-out inputs from this run.
de_dx_no_bias = self.de_dx[:, :-1]
if self.i_dropout is not None:
de_dx_no_bias[:, self.i_dropout] = 0
# Remove the bias node from the gradient vector.
self.previous_layer.de_dy += de_dx_no_bias
|
import math
import numpy as np
import tensorflow as tf
from ..learning.nn.injectors import SkipGramInjector
def sensor2vec(num_sensors, sensor_event_list, embedding_size=20,
batch_size=128, num_skips=8, skip_window=5,
num_neg_samples=64, learning_rate=1.0):
"""Sensor to Vector
"""
if num_neg_samples > num_sensors:
num_neg_samples = num_sensors
# Initialize a SkipGram Injector
injector = SkipGramInjector(sensor_event_list, batch_size, num_skips, skip_window)
# Build Training Model
graph = tf.Graph()
with graph.as_default():
# Input Place Holder
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# As we normally do not have too many sensors - it is OK to use all of them
valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)
# Only CPU supports NCE loss
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([num_sensors, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_sensors]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_neg_samples,
num_classes=num_sensors))
# Construct the Optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = injector.next_batch()
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
final_embeddings = normalized_embeddings.eval()
final_similarity = 1 - similarity.eval()
distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:, None]
return final_embeddings, distance_matrix
def sensor2vec_data(sensor_list, event_list, embedding_size=20,
batch_size=128, num_skips=8, skip_window=5,
num_neg_samples=64, learning_rate=1.0, ignore_off=True):
"""Transform sensor to high dimensional space
Similar to word embedding used in natural language processing system, we want
to represent sensors using in a synthesized vector space as well, instead of
using an arbitrary labels for each sensors without any useful information.
The methods used to find word embeddings can be classified into two categories:
count-based methods (Latent Semantic Analysis) and predictive models.
In this implementation for mapping sensor into high dimension vector space, we
use skip-gram negative sampling models.
Args:
sensor_list (:obj:`list` of :obj:`dict`): List of dictionary containing
sensor information.
event_list (:obj:`list` of :obj:`dict`): List of events.
embedding_size (:obj:`int`): The size of embedding vector.
batch_size (:obj:`int`): The number of batch used in training
num_skips (:obj:`int`): How many times to re-use an input to generate a label
in skip-gram model.
skip_window (:obj:`int`): How many items to consider left or right in skip-gram
model.
num_neg_samples (:obj:`int`): Number of negative samples to draw from the vocabulary.
ignore_off (:obj:`bool`): Ignore motion-sensor with ``Off`` state in event.rst list.
Please refer to :func:`sensor_distance` for an example of ``sensor_list``.
Please refer to :func:`sensor_mi_distance` for an example of ``event_list``.
"""
# Put sensor in hash table for fast fetch of index
num_sensors = len(sensor_list)
# Negative samples cannot exceed sensor numbers
if num_neg_samples > num_sensors:
num_neg_samples = num_sensors
# Store sensor ID in hash table for faster access
sensor_dict = {}
for i in range(num_sensors):
sensor_dict[sensor_list[i]['name']] = i
# Generate event.rst sensor list
event_sensor_list = []
for event_entry in event_list:
if ignore_off and event_entry['sensor_status'].upper() == "OFF":
continue
event_sensor_list.append(sensor_dict[event_entry['sensor_id']])
# Initialize a SkipGram Injector
injector = SkipGramInjector(event_sensor_list, batch_size, num_skips, skip_window)
# Build Training Model
graph = tf.Graph()
with graph.as_default():
# Input Place Holder
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# As we normally do not have too many sensors - it is OK to use all of them
valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)
# Only CPU supports NCE loss
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([num_sensors, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_sensors]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_neg_samples,
num_classes=num_sensors))
# Construct the Optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = injector.next_batch()
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(num_sensors):
valid_sensor = sensor_list[i]['name']
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_sensor
for k in range(top_k):
close_sensor = sensor_list[nearest[k]]['name']
log_str = "%s %s," % (log_str, close_sensor)
print(log_str)
final_embeddings = normalized_embeddings.eval()
final_similarity = 1 - similarity.eval()
distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:,None]
# try:
# from sklearn.manifold import TSNE
# import matplotlib.pyplot as plt
#
# tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
# low_dim_embs = tsne.fit_transform(final_embeddings)
# labels = [sensor_list[i]['name'] for i in range(num_sensors)]
#
# assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
# plt.figure(figsize=(18, 18)) # in inches
# for i, label in enumerate(labels):
# x, y = low_dim_embs[i, :]
# plt.scatter(x, y)
# plt.annotate(label,
# xy=(x, y),
# xytext=(5, 2),
# textcoords='offset points',
# ha='right',
# va='bottom')
# plt.show()
# except ImportError:
# print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
return final_embeddings, distance_matrix
|
# Uses python3
import sys
""" def get_majority_element(a, left, right):
if left == right:
return -1
if left + 1 == right:
return a[left]
#write your code here
return -1 """
def get_majority_element_hash_approach(a, n):
new = {}
for e in a:
if e not in new:
new[e] = 1
else:
new[e] += 1
for keys, val in new.items():
if val > n / 2:
return 1
return 0
if __name__ == '__main__':
n = int(input())
a = list(map(int, input().split()))
# if get_majority_element(a, 0, n) != -1:
if get_majority_element_hash_approach(a, n):
print(1)
else:
print(0)
|
from kivy.uix.button import Button
from kivy.properties import StringProperty, BooleanProperty, NumericProperty, ObjectProperty
from kivy.graphics import Color, Rectangle, RoundedRectangle, Ellipse
from kivy.lang import Builder
Builder.load_string('''
<FlatButton>:
background_normal: ''
background_color: [0,0,0,0]
text_size: self.size
valign: 'middle'
halign: 'center'
markup: True
''')
class RoundedButton(FlatButton):
radius = NumericProperty(10)
def update_back(self):
with self.canvas.before:
self.color = Color(rgba=self.background_color)
self.rect = RoundedRectangle(
pos=self.pos,
size=self.size,
radius=self.radius)
def on_radius(self, _, value):
"""When the radius is set/changed, this function
is called to update the radius of the button on the
canvas
Parameters
----------
_ : widget
This is usually the instance calling the function,
we dont care about this
value : number
The value of the radius property
Returns
-------
None
"""
self.rect.radius = value
class FlatButton(Button):
"""A normal ::class `kivy.uix.button.Button` with all
the visual representations removed, this button
basically just looks like a label, but ofcourse, unlike
a label, its clickable.
Since this inherits from a normal Button, it
supports all of its properties.
Usage
---------
from ukivy.button import FlatButton
...
btn = FlatButton(text='myButton')
some_widget.add_widget(btn)
...
"""
pass
|
"""
ASGI config for reportsmanagement project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportsmanagement.settings')
application = get_asgi_application()
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-nao&q&bu0i4@-&!nep#b%6x=-_f@-4hu)tb!09w8nujq5nwma*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_1.models.node_driveconfig_node_alert import NodeDriveconfigNodeAlert # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_allow import NodeDriveconfigNodeAllow # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_automatic_replacement_recognition import NodeDriveconfigNodeAutomaticReplacementRecognition # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_instant_secure_erase import NodeDriveconfigNodeInstantSecureErase # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_log import NodeDriveconfigNodeLog # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_reboot import NodeDriveconfigNodeReboot # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_spin_wait import NodeDriveconfigNodeSpinWait # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_stall import NodeDriveconfigNodeStall # noqa: F401,E501
class ClusterNodeDriveDConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert': 'NodeDriveconfigNodeAlert',
'allow': 'NodeDriveconfigNodeAllow',
'automatic_replacement_recognition': 'NodeDriveconfigNodeAutomaticReplacementRecognition',
'instant_secure_erase': 'NodeDriveconfigNodeInstantSecureErase',
'log': 'NodeDriveconfigNodeLog',
'reboot': 'NodeDriveconfigNodeReboot',
'spin_wait': 'NodeDriveconfigNodeSpinWait',
'stall': 'NodeDriveconfigNodeStall'
}
attribute_map = {
'alert': 'alert',
'allow': 'allow',
'automatic_replacement_recognition': 'automatic_replacement_recognition',
'instant_secure_erase': 'instant_secure_erase',
'log': 'log',
'reboot': 'reboot',
'spin_wait': 'spin_wait',
'stall': 'stall'
}
def __init__(self, alert=None, allow=None, automatic_replacement_recognition=None, instant_secure_erase=None, log=None, reboot=None, spin_wait=None, stall=None): # noqa: E501
"""ClusterNodeDriveDConfig - a model defined in Swagger""" # noqa: E501
self._alert = None
self._allow = None
self._automatic_replacement_recognition = None
self._instant_secure_erase = None
self._log = None
self._reboot = None
self._spin_wait = None
self._stall = None
self.discriminator = None
if alert is not None:
self.alert = alert
if allow is not None:
self.allow = allow
if automatic_replacement_recognition is not None:
self.automatic_replacement_recognition = automatic_replacement_recognition
if instant_secure_erase is not None:
self.instant_secure_erase = instant_secure_erase
if log is not None:
self.log = log
if reboot is not None:
self.reboot = reboot
if spin_wait is not None:
self.spin_wait = spin_wait
if stall is not None:
self.stall = stall
@property
def alert(self):
"""Gets the alert of this ClusterNodeDriveDConfig. # noqa: E501
Configuration setting for drive alerts. # noqa: E501
:return: The alert of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeAlert
"""
return self._alert
@alert.setter
def alert(self, alert):
"""Sets the alert of this ClusterNodeDriveDConfig.
Configuration setting for drive alerts. # noqa: E501
:param alert: The alert of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeAlert
"""
self._alert = alert
@property
def allow(self):
"""Gets the allow of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for drive formatting. # noqa: E501
:return: The allow of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeAllow
"""
return self._allow
@allow.setter
def allow(self, allow):
"""Sets the allow of this ClusterNodeDriveDConfig.
Configuration settings for drive formatting. # noqa: E501
:param allow: The allow of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeAllow
"""
self._allow = allow
@property
def automatic_replacement_recognition(self):
"""Gets the automatic_replacement_recognition of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for Automatic Replacement Recognition (ARR). # noqa: E501
:return: The automatic_replacement_recognition of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeAutomaticReplacementRecognition
"""
return self._automatic_replacement_recognition
@automatic_replacement_recognition.setter
def automatic_replacement_recognition(self, automatic_replacement_recognition):
"""Sets the automatic_replacement_recognition of this ClusterNodeDriveDConfig.
Configuration settings for Automatic Replacement Recognition (ARR). # noqa: E501
:param automatic_replacement_recognition: The automatic_replacement_recognition of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeAutomaticReplacementRecognition
"""
self._automatic_replacement_recognition = automatic_replacement_recognition
@property
def instant_secure_erase(self):
"""Gets the instant_secure_erase of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for instant secure erase (ISE). # noqa: E501
:return: The instant_secure_erase of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeInstantSecureErase
"""
return self._instant_secure_erase
@instant_secure_erase.setter
def instant_secure_erase(self, instant_secure_erase):
"""Sets the instant_secure_erase of this ClusterNodeDriveDConfig.
Configuration settings for instant secure erase (ISE). # noqa: E501
:param instant_secure_erase: The instant_secure_erase of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeInstantSecureErase
"""
self._instant_secure_erase = instant_secure_erase
@property
def log(self):
"""Gets the log of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for drive statistics logs. # noqa: E501
:return: The log of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeLog
"""
return self._log
@log.setter
def log(self, log):
"""Sets the log of this ClusterNodeDriveDConfig.
Configuration settings for drive statistics logs. # noqa: E501
:param log: The log of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeLog
"""
self._log = log
@property
def reboot(self):
"""Gets the reboot of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for a node reboot due to a drive error. # noqa: E501
:return: The reboot of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeReboot
"""
return self._reboot
@reboot.setter
def reboot(self, reboot):
"""Sets the reboot of this ClusterNodeDriveDConfig.
Configuration settings for a node reboot due to a drive error. # noqa: E501
:param reboot: The reboot of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeReboot
"""
self._reboot = reboot
@property
def spin_wait(self):
"""Gets the spin_wait of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for sleeping the drive daemon before node is rescanned. # noqa: E501
:return: The spin_wait of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeSpinWait
"""
return self._spin_wait
@spin_wait.setter
def spin_wait(self, spin_wait):
"""Sets the spin_wait of this ClusterNodeDriveDConfig.
Configuration settings for sleeping the drive daemon before node is rescanned. # noqa: E501
:param spin_wait: The spin_wait of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeSpinWait
"""
self._spin_wait = spin_wait
@property
def stall(self):
"""Gets the stall of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings to evaluate a drive stall. # noqa: E501
:return: The stall of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeStall
"""
return self._stall
@stall.setter
def stall(self, stall):
"""Sets the stall of this ClusterNodeDriveDConfig.
Configuration settings to evaluate a drive stall. # noqa: E501
:param stall: The stall of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeStall
"""
self._stall = stall
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClusterNodeDriveDConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# ******************* BLOG MODULE ****************************** #
# ** Created by Yossep
# ** github: https://github.com/j2B237/
# ** Project : Joblogueur
# ** Description:
#
# Within this module we have many functions designed to help display posts
# Methods such as :
# display all posts
# display posts per category
# display individual post
# register email user for the newsletter
# ************************************************************************ #
# Third party import
from flask import Blueprint, render_template, flash, request, redirect, url_for
from flask_mail import Message
# Local import
from FlaskApp.models import Post, Category, Moderator, Comment
from FlaskApp.forms import CommentForm
from . import db, ext, mail
bp = Blueprint('blog', __name__)
# Fake data to seed the website view
fake_Category = [
{
'id': 1,
'category_name': "10 bonnes raisons",
'color': 'primary'
},
{
'id': 2,
'category_name': "Comment réussir ?",
'color': 'success',
},
{
'id': 3,
'category_name': "Offres et formations",
'color': 'warning'
}
]
fake_moderators = [
{ 'id': 1,
'username': 'admin',
'email': 'admin@exemple.com',
'password': 'admin237',
'address1': 'address1',
'address2': 'address2',
'city': 'city',
'state': 'state',
'country': 'country',
'zipcode': 'zipcode',
'is_admin': True,
'image_file': 'default.jpg',
'created_on': '21/02/2021',
'posts': []
}
]
fake_posts = [
{
'id': 1,
'title': 'Comment réussir à gagner de l\'argent sur internet',
'introduction': 'Qu\’ils soient aujourd\’hui milliardaires ou non, reconnus à l\’international ou en France.',
'p_intro': 'Ils ont tous commencer simplement. Pour toi modeste citoyen qui voudrait gagner de l\'argent pour arrondir tes fins du mois, nous avons sélectionner une liste de sites et bonnes astuces à essayer',
'h1': "",
'p_h1': "",
'h2': "",
'p_h2': "",
'h3': "",
'p_h3': "",
'h4': "",
'p_h4': "",
'h5': "",
'p_h5': "",
'conclusion': "",
'p_conclusion': "",
'date_posted': '10/02/2021',
'display_or_not': True,
'moderator_id': 1,
'category_id': 1,
'comments': [],
}
]
fake_comments = [
{
'id': 1,
'author_name': 'admin',
'email_author': 'admin@exemple.com',
'content': 'C\'est bon tout ca.',
'date_posted': '12/02/2021',
'approved_or_not': True,
'post_id': 1
}
]
# Create a sitemap
@ext.register_generator
def index():
yield 'index', {}
# Home blog view
@bp.route('/')
def index():
global fake_moderators, fake_comments, fake_posts, fake_Category
categories = Category.query.all()
moderators = Moderator.query.all()
posts_to_display = Post.query.all()
post_banner = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
last_post = Post.query.join(Category).filter(Category.category_name == "TUTORIELS").order_by(
Post.date_posted.desc()).first()
posts_for_cards = Post.query.filter_by(display_or_not=True).order_by(Post.date_posted.desc())[:4]
post_business = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
post_formation = Post.query.join(Category).filter(Category.category_name == "FORMATIONS"). \
order_by(Post.date_posted.desc()).first()
post_tutoriel = Post.query.join(Category).filter(Category.category_name == "TUTORIELS"). \
order_by(Post.date_posted.desc()).first()
post_ressource = Post.query.join(Category).filter(Category.category_name == "RESSOURCES"). \
order_by(Post.date_posted.desc()).first()
image_posts = []
for post in posts_for_cards:
image = post.img_title
image_posts.append(image)
return render_template('blog/blog.html', title="Accueil - Joblogueur",
categories=categories, last_post=last_post,moderators=moderators,
images=image_posts, posts_to_display=posts_to_display,
post_banner=post_banner, post_business=post_business,
post_formation=post_formation, post_tutoriel=post_tutoriel, post_ressource=post_ressource)
# Display individual post
@bp.route('/publication/<post_title>', methods=['POST', 'GET'])
def post(post_title):
form = CommentForm()
titre = post_title.replace('-', ' ')
# Recherche la publication par son titre
post = Post.query.filter_by(title=titre).first()
moderators = Moderator.query.all()
# Recherche tous les commentaires liés à cette publication
comments_to_display = Comment.query.join(Post).filter(Comment.post_id == post.id).\
order_by(Comment.date_posted.desc()).all()
# Liste toutes les categories
categories = Category.query.all()
nbr_comments = 0
# Calcul le nbre de commentaires par publication
for comment in post.comments:
if comment.approved_or_not:
nbr_comments += 1
if form.validate_on_submit():
search_comments = Comment.query.filter_by(email_author=form.author_email.data).all()
ids = []
for comment in search_comments:
ids.append(comment.post_id)
if post.id in ids:
flash("Vous avez deja commenté cet article", "info")
# Création d'un commentaire
else:
new_comment = Comment(name_author=form.author.data, email_author=form.author_email.data,
content=form.content.data, post_id=post.id, approved_or_not=False)
db.session.add(new_comment)
db.session.commit()
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
flash("Votre commentaire est en cours de validation", "success")
return render_template('blog/blog_post.html', title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories, comments=comments_to_display,
titre=post_title)
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
image_file = url_for('static', filename='upload/'+str(post.img_title))
return render_template("blog/blog_post.html", title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories,
comments=comments_to_display, image=image_file, moderators=moderators,
titre=post_title)
# Display post per category
@bp.route('/publications/<category_name>')
def post_per_category(category_name):
page = request.args.get('page', 1, type=int)
search_category = category_name.replace('-', ' ')
categories = Category.query.all()
posts = Post.query.join(Category).filter(Category.category_name == search_category).\
order_by(Post.date_posted.desc()).paginate(per_page=7, page=page)
image_posts = []
for post in posts.items:
image = post.img_title
image_posts.append(image)
return render_template("blog/posts_per_category.html", title=search_category + " | Joblogueur", posts=posts,
categories=categories, search_category=search_category, images=image_posts)
# Register user for daily news
@bp.route('/newsletter-invitation', methods=['POST','GET'])
def newsletter_invitation():
categories = Category.query.all()
posts_per_category = []
for category in categories:
last_post = Post.query.join(Category).filter(Post.category_id == category.id).first()
posts_per_category.append(last_post)
if request.method == 'POST':
usermail = request.form['usermail']
content = """
Salut très cher(e),
Comment vas-tu ?
Il y'a du nouveau sur ton blog préféré www.digitalschools.sn/blog
Ci-dessous une liste des publications que tu as surement manqués:
1- https://3df5e7df0cdb.ngrok.io/blog/publication/10-raisons-pourquoi-toute-entreprise-doit-cr%C3%A9er-ou-avoir-un-site-Web
2- https://3df5e7df0cdb.ngrok.io/blog/publication/10-bonnes-raisons-d%27apprendre-%C3%A0-son-enfant-%C3%A0-coder
3- https://3df5e7df0cdb.ngrok.io/blog/publication/FLASK-1.0.0
Merci pour ton temps et ta perséverance dans la lecture quotidienne.
Youssouf BINYOUM (digitalschools.sn)
"""
msg = Message("Nouvelle publication sur digitalschools.sn/blog", recipients=[usermail],
sender='contact@digitalschools.sn')
msg.body = content
mail.send(msg)
print(request.args)
return redirect(url_for('blog.index'))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
from sites import foolSlide
from sites import readcomicOnlineto
from sites import comicNaver
from sites import mangaHere
from sites import rawSenManga
from sites import mangaFox
from sites import omgBeauPeep
from sites import mangaReader
from sites import mangaEden
from sites import acQQ
from sites import stripUtopia
from sites import readComicBooksOnline
from sites import readComicsWebsite
from sites import batoto
from sites import hqbr
from sites import comicextra
from sites import readComicsIO
from sites import japscan
from sites import manganelo
import globalFunctions
class Honcho(object):
def comic_language_resolver(self, language_code):
# Will return the Language Name corresponding to the language code.
language_dict = {
'0': 'English',
'1': 'Italian',
'2': 'Spanish',
'3': 'French',
'4': 'German',
'5': 'Portuguese',
'6': 'Turkish',
'7': 'Indonesian',
'8': 'Greek',
'9': 'Filipino',
'10': 'Polish',
'11': 'Thai',
'12': 'Malay',
'13 ': 'Hungarian',
'14': 'Romanian',
'15': ' Arabic',
'16': 'Hebrew',
'17': 'Russian',
'18': 'Vietnamese',
'19': 'Dutch',
'20': 'Bengali',
'21': 'Persian',
'22': 'Czech',
'23': 'Brazilian',
'24': 'Bulgarian',
'25': 'Danish',
'26': 'Esperanto',
'27': 'Swedish',
'28': 'Lithuanian',
'29': 'Other'
}
return language_dict[language_code]
def checker(self, comic_url, download_directory, chapter_range, **kwargs):
user_name = kwargs.get("username")
password = kwargs.get("password")
current_directory = kwargs.get("current_directory")
log_flag = kwargs.get("logger")
sorting = kwargs.get("sorting_order")
comic_language = kwargs.get("comic_language")
print_index = kwargs.get("print_index")
if log_flag is True:
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG)
logging.debug("Comic Url : %s" % comic_url)
domain = urlparse(comic_url).netloc
logging.debug("Selected Domain : %s" % domain)
# Remove the "/" from ending to make checking URL for Full Series or Single Chapter easier.
if comic_url[-1] == "/":
comic_url = comic_url[:-1]
if domain in ["yomanga.co", "gomanga.co"]:
foolSlide.FoolSlide(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
elif domain in ["www.readcomiconline.to", "readcomiconline.to"]:
readcomicOnlineto.ReadComicOnlineTo(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
image_quality=kwargs.get("image_quality"),
print_index=print_index)
return 0
elif domain in ["www.comic.naver.com", "comic.naver.com"]:
comicNaver.ComicNaver(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangahere.co", "mangahere.co", "www.mangahere.cc", "mangahere.cc"]:
mangaHere.MangaHere(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.raw.senmanga.com", "raw.senmanga.com"]:
rawSenManga.RawSenaManga(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangafox.me", "mangafox.me", "www.mangafox.la", "mangafox.la", "www.fanfox.net",
"fanfox.net"]:
mangaFox.MangaFox(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.omgbeaupeep.com", "omgbeaupeep.com", "www.otakusmash.com", "otakusmash.com"]:
omgBeauPeep.OmgBeauPeep(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO --print-index -i http://ac.qq.com/Comic/comicInfo/id/547059?trace_id=907_27.156.162.231_1539265645 broken?
elif domain in ["www.ac.qq.com", "ac.qq.com"]:
acQQ.AcQq(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.striputopija.blogspot.in", "striputopija.blogspot.in", "www.striputopija.blogspot.com",
"striputopija.blogspot.com"]:
stripUtopia.StripUtopia(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.mangareader.net", "mangareader.net"]:
mangaReader.MangaReader(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.readcomicbooksonline.net", "readcomicbooksonline.net", "www.readcomicbooksonline.org",
"readcomicbooksonline.org"]:
readComicBooksOnline.ReadComicBooksOnline(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.website", "readcomics.website"]:
readComicsWebsite.ReadComicsWebsite(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.japscan.to"]:
japscan.Japscan(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.hqbr.com.br", "hqbr.com.br"]:
hqbr.Hqbr(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.comicextra.com", "comicextra.com"]:
comicextra.ComicExtra(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.io", "readcomics.io"]:
readComicsIO.ReadComicsIO(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.kissmanga.com", "kissmanga.com"]:
# kissManga.KissManga(manga_url = comic_url, logger = logging,
# current_directory = current_directory, sorting_order = sorting)
print("Under Development!")
return 0
elif domain in ["www.bato.to", "bato.to"]:
batoto.Batoto(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"), username=user_name, password=password,
comic_language=self.comic_language_resolver(comic_language),
print_index=print_index)
return 0
elif domain in ["manganelo.com", "mangakakalot.com"]:
manganelo.Manganelo(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangaeden.com"]:
if print_index:
print("please use -find and -cid instead!")
return -1
mangaEden.MangaEden(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
else:
print("%s is not supported at the moment. You can request it on the Github repository." % domain)
|
from django.urls import path
from . import views
app_name = 'subscribers'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('manage/', views.manage, name='manage'),
path('goodbye/<uuid:mailing_list_uuid>/', views.goodbye, name='goodbye'),
path('subscribe/<uuid:mailing_list_uuid>/', views.subscribe, name='subscribe'),
path('subscribe/<uuid:mailing_list_uuid>/confirm/', views.confirm_subscription, name='confirm_subscription'),
path('subscribe/<uuid:mailing_list_uuid>/confirm/<str:token>/', views.confirm_double_optin_token, name='confirm_double_optin_token'), # noqa
path('unsubscribe/<uuid:mailing_list_uuid>/', views.unsubscribe_manual, name='unsubscribe_manual'),
path('unsubscribe/<uuid:mailing_list_uuid>/<uuid:subscriber_uuid>/<uuid:campaign_uuid>/', views.unsubscribe, name='unsubscribe'), # noqa
path('track/open/<uuid:email_uuid>/<uuid:subscriber_uuid>/', views.track_open, name='open'),
path('track/click/<uuid:link_uuid>/<uuid:subscriber_uuid>/', views.track_click, name='click'),
]
|
#!/usr/bin/env python
import random
import argparse
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import gym
import gym.spaces
import numpy as np
log = gym.logger
log.set_level(gym.logger.INFO)
LATENT_VECTOR_SIZE = 100
DISCR_FILTERS = 64
GENER_FILTERS = 64
BATCH_SIZE = 16
# dimension input image will be rescaled
IMAGE_SIZE = 64
LEARNING_RATE = 0.0001
REPORT_EVERY_ITER = 25
SAVE_IMAGE_EVERY_ITER = 1000
class InputWrapper(gym.ObservationWrapper):
"""
Preprocessing of input numpy array:
1. resize image into predefined size
2. move color channel axis to a first place
"""
def __init__(self, *args):
super(InputWrapper, self).__init__(*args)
assert isinstance(self.observation_space, gym.spaces.Box)
old_space = self.observation_space
self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),
dtype=np.float32)
def observation(self, observation):
# resize image
new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE))
# transform (210, 160, 3) -> (3, 210, 160)
new_obs = np.moveaxis(new_obs, 2, 0)
return new_obs.astype(np.float32)
class Discriminator(nn.Module):
def __init__(self, input_shape):
super(Discriminator, self).__init__()
# this pipe converges image into the single number
self.conv_pipe = nn.Sequential(
nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS*2),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 4),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 8),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,
kernel_size=4, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
conv_out = self.conv_pipe(x)
return conv_out.view(-1, 1).squeeze(dim=1)
class Generator(nn.Module):
def __init__(self, output_shape):
super(Generator, self).__init__()
# pipe deconvolves input vector into (3, 64, 64) image
self.pipe = nn.Sequential(
nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,
kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(GENER_FILTERS * 8),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 4),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 2),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],
kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
return self.pipe(x)
def iterate_batches(envs, batch_size=BATCH_SIZE):
batch = [e.reset() for e in envs]
env_gen = iter(lambda: random.choice(envs), None)
while True:
e = next(env_gen)
obs, reward, is_done, _ = e.step(e.action_space.sample())
if np.mean(obs) > 0.01:
batch.append(obs)
if len(batch) == batch_size:
# Normalising input between -1 to 1
batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0
yield torch.tensor(batch_np)
batch.clear()
if is_done:
e.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--cuda", default=False, action='store_true', help="Enable cuda computation")
parser.add_argument("--cuda", default=True, action='store_true', help="Enable cuda computation")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]
input_shape = envs[0].observation_space.shape
net_discr = Discriminator(input_shape=input_shape).to(device)
net_gener = Generator(output_shape=input_shape).to(device)
objective = nn.BCELoss()
gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
writer = SummaryWriter()
gen_losses = []
dis_losses = []
iter_no = 0
true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)
fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)
for batch_v in iterate_batches(envs):
# generate extra fake samples, input is 4D: batch, filters, x, y
gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)
batch_v = batch_v.to(device)
gen_output_v = net_gener(gen_input_v)
# train discriminator
dis_optimizer.zero_grad()
dis_output_true_v = net_discr(batch_v)
dis_output_fake_v = net_discr(gen_output_v.detach())
dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
dis_loss.backward()
dis_optimizer.step()
dis_losses.append(dis_loss.item())
# train generator
gen_optimizer.zero_grad()
dis_output_v = net_discr(gen_output_v)
gen_loss_v = objective(dis_output_v, true_labels_v)
gen_loss_v.backward()
gen_optimizer.step()
gen_losses.append(gen_loss_v.item())
iter_no += 1
if iter_no % REPORT_EVERY_ITER == 0:
log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
gen_losses = []
dis_losses = []
if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
writer.add_image("fake", vutils.make_grid(gen_output_v.data[:64], normalize=True), iter_no)
writer.add_image("real", vutils.make_grid(batch_v.data[:64], normalize=True), iter_no)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import MySQLdb
import datetime
import random
brand_data = {}
today = datetime.date.today()
report_tittle = "milk/milk_{}.html".format(today.strftime("%Y_%m"))
first = today.replace(day=1)
last_year = first - datetime.timedelta(days=365)
rang_low = today.strftime("%Y-01-01")
rang_high = today.strftime("%Y-12-31")
this_year = today.year
conn= MySQLdb.connect(
host='localhost',
port = 3306,
user='br',
passwd='123456',
db ='brandrank',
)
cur = conn.cursor()
query = ["select",
"name,",
"id",
"from rankx_brand"
]
sql_query = " ".join(query)
print sql_query
results = cur.execute(sql_query)
info = cur.fetchmany(results)
sql_template = "insert into rankx_milk (rank, pv,taobao_sales,jd_sales,tmall_sales,vip_sales,amazon_sales,weibo_fans,weibo_forward, weixin_fans,pub_date, brand_name_id, brand_record) values"
pub_dates = []
for d in ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']:
for y in ['2013', '2014', '2015']:
pub_dates.append('{}-{}-02'.format(y, d))
for name, bid in info:
for d in pub_dates:
pv = 10000 + random.randint(1, 10000)
taobao_sales = 20000 + random.randint(1, 10000)
jd_sales = 20000 + random.randint(1, 10000)
tmall_sales = 20000 + random.randint(1, 10000)
vip_sales = 20000 + random.randint(1, 10000)
amazon_sales = 20000 + random.randint(1, 10000)
weibo_fans = 20000 + random.randint(1, 10000)
weibo_forward = 20000 + random.randint(1, 10000)
weixin_fans = 20000 + random.randint(1, 10000)
brand_record = name.replace(' ', '_').replace('-', '_') + '_' + d.replace('-', '_')
sql = sql_template + "(0, {}, {}, {}, {}, {}, {}, {}, {}, {}, '{}', {}, '{}');".format(
pv, taobao_sales, jd_sales, tmall_sales, vip_sales, amazon_sales, weibo_fans,
weibo_forward, weixin_fans, d, bid, brand_record
)
print sql
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
|
_base_ = './gfl_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
work_dir = 'work_dirs/coco/gfl/gfl_r50_fpn_2x_coco'
|
import timeit
class timer(object):
def __init__(self, repeats=3, loops=1, gc=False):
self.repeats = repeats
self.loops = loops
self.gc = gc
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is not None:
return False
else:
return True
def results(self):
return self.func()
def time(self, func, *args, **kargs):
if self.gc is True:
self.gbcol="gc.enable()"
else:
self.gbcol="gc.disable()"
self.funcname = func.__name__
pfunc = lambda: func(*args, **kargs)
self.elapsed = timeit.repeat(pfunc, self.gbcol, repeat=self.repeats, number=self.loops)
self.runtime = min(self.elapsed)
return [self.runtime, self.funcname]
def printTime(self):
result = "%s finished in %.5fs (%s loops, repeated %s times): %.5fs per loop (with %s)" % (self.funcname, self.runtime, self.loops, self.repeats, self.runtime/self.loops, self.gbcol)
print result
|
# -*- coding: utf8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
CAMEL_RE = re.compile(r'([A-Z][a-z]+|[A-Z]+(?=[A-Z\s]|$))')
def de_camel_case(text):
"""Convert CamelCase names to human-readable format."""
return ' '.join(w.strip() for w in CAMEL_RE.split(text) if w.strip())
def list_to_dict(object_list, key_attribute='id'):
"""Converts an object list to a dict
:param object_list: list of objects to be put into a dict
:type object_list: list
:param key_attribute: object attribute used as index by dict
:type key_attribute: str
:return: dict containing the objects in the list
:rtype: dict
"""
return dict((getattr(o, key_attribute), o) for o in object_list)
def length(iterator):
"""A length function for iterators
Returns the number of items in the specified iterator. Note that this
function consumes the iterator in the process.
"""
return sum(1 for _item in iterator)
def check_image_type(image, image_type):
"""Check if image 'type' property matches passed-in image_type.
If image has no 'type' property' return True, as we cannot
be sure what type of image it is.
"""
return (image.properties.get('type', image_type) == image_type)
def filter_items(items, **kwargs):
"""Filters the list of items and returns the filtered list.
Example usage:
>>> class Item(object):
... def __init__(self, index):
... self.index = index
... def __repr__(self):
... return '<Item index=%d>' % self.index
>>> items = [Item(i) for i in range(7)]
>>> list(filter_items(items, index=1))
[<Item index=1>]
>>> list(filter_items(items, index__in=(1, 2, 3)))
[<Item index=1>, <Item index=2>, <Item index=3>]
>>> list(filter_items(items, index__not_in=(1, 2, 3)))
[<Item index=0>, <Item index=4>, <Item index=5>, <Item index=6>]
"""
for item in items:
for name, value in kwargs.items():
if name.endswith('__in'):
if getattr(item, name[:-len('__in')]) not in value:
break
elif name.endswith('__not_in'):
if getattr(item, name[:-len('__not_in')]) in value:
break
else:
if getattr(item, name) != value:
break
else:
yield item
def safe_int_cast(value):
try:
return int(value)
except (TypeError, ValueError):
return 0
|
"""
Harness for visualising a neural network.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
import functools
import graphviz as gv
import os
import networkx as nx
import numpy as np
# Parameters for plotting
_SAVE_FORMAT = 'eps'
# _SAVE_FORMAT = 'png'
_LAYER_SHAPE = 'rectangle'
_IPOP_SHAPE = 'circle'
_LAYER_FONT = 'DejaVuSans'
_IPOP_FONT = 'Helvetica'
_LAYER_FONTSIZE = '16'
_FILLCOLOR = 'transparent'
_IPOP_FONTSIZE = '12'
_IPOP_FILLCOLOR = '#ffc0cb'
_DECISION_FILLCOLOR = '#98fb98'
_GRAPH_STYLES = {
'graph': {
'fontsize': _LAYER_FONTSIZE,
'rankdir': 'TB',
'label': None,
},
'nodes': {
},
'edges': {
'arrowhead': 'open',
'fontsize': '12',
}
}
GV_GRAPH = functools.partial(gv.Graph, format=_SAVE_FORMAT)
GV_DIGRAPH = functools.partial(gv.Digraph, format=_SAVE_FORMAT)
# Utilities for adding nodes, edges and styles -------------------------------------------
def add_nodes(graph, nodes):
""" Adds nodes to the graph. """
for n in nodes:
if isinstance(n, tuple):
graph.node(n[0], **n[1])
else:
graph.node(n)
return graph
def add_edges(graph, edges):
""" Adds edges to the graph. """
# pylint: disable=star-args
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
def apply_styles(graph, styles):
""" Applies styles to the graph. """
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
# Wrappers for tedious routines ----------------------------------------------------------
def _get_ip_layer(layer_idx):
""" Returns a tuple representing the input layer. """
return (str(layer_idx), {'label': 'i/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_op_layer(layer_idx):
""" Returns a tuple representing the output layer. """
return (str(layer_idx), {'label': 'o/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_layer(layer_idx, nn, for_pres):
""" Returns a tuple representing the layer label. """
if nn.layer_labels[layer_idx] in ['ip', 'op']:
fill_colour = _IPOP_FILLCOLOR
elif nn.layer_labels[layer_idx] in ['softmax', 'linear']:
fill_colour = _DECISION_FILLCOLOR
else:
fill_colour = _FILLCOLOR
label = nn.get_layer_descr(layer_idx, for_pres)
return (str(layer_idx), {'label': label, 'shape': 'rectangle', 'fillcolor': fill_colour,
'style': 'filled', 'fontname': _LAYER_FONT}),((layer_idx), nn.layer_labels[layer_idx],(nn.num_units_in_each_layer[layer_idx]))
def _get_edge(layer_idx_start, layer_idx_end):
""" Returns a tuple which is an edge. """
return (str(layer_idx_start), str(layer_idx_end))
def _get_edges(conn_mat):
""" Returns all edges. """
starts, ends = conn_mat.nonzero()
return [_get_edge(starts[i], ends[i]) for i in range(len(starts))]
# Main API ------------------------------------------------------------------------------
def visualise_nn(nn, save_file_prefix, fig_label=None, for_pres=True):
""" The main API which will be used to visualise the network. """
# First create nodes in the order
nodes = [_get_layer(i, nn, for_pres)[0] for i in range(nn.num_layers)]
nodes_my = [_get_layer(i, nn, for_pres)[1] for i in range(nn.num_layers)]
#print("nodes_my=",nodes_my)
edges = _get_edges(nn.conn_mat)
edges_my = [(int(s),int(t)) for s,t in edges]
#print("edges_my=",edges_my)
nn_graph = GV_DIGRAPH()
add_nodes(nn_graph, nodes)
add_edges(nn_graph, edges)
graph_styles = _GRAPH_STYLES
graph_styles['graph']['label'] = fig_label
apply_styles(nn_graph, graph_styles)
nn_graph.render(save_file_prefix)
if os.path.exists(save_file_prefix):
# graphviz also creates another file in the name of the prefix. delete it.
os.remove(save_file_prefix)
return tonxgraph(nodes_my,edges_my)
NODE_TYPES = ['ip', 'op', 'linear']
hidden_list = [8,16,32,64,128,256,512,1024]
for i in hidden_list:
NODE_TYPES.append("relu-%s"%i)
NODE_TYPES.append("crelu-%s"%i)
NODE_TYPES.append("leaky-relu-%s"%i)
NODE_TYPES.append("softplus-%s"%i)
NODE_TYPES.append("elu-%s"%i)
NODE_TYPES.append("logistic-%s"%i)
NODE_TYPES.append("tanh-%s"%i)
def tonxgraph(nodes_my,edges_my):
g = {"x":[],"edge_index":[],"edge_attr":[]}
for n_idx, type, num_hidden in nodes_my:
n_idx = int(n_idx)
if type=='ip' or type=='op' or type=='linear':
g["x"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index(type)])
else:
num_hidden = np.random.choice(hidden_list)
g["x"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index("%s-%s"%(type,num_hidden))])
row = []
col = []
for s, t in edges_my:
row.append(s)
col.append(t)
g["edge_attr"].append(np.ones(1))
g["edge_index"].append(row)
g["edge_index"].append(col)
g["x"]=np.array(g["x"])
g["edge_attr"]=np.array(g["edge_attr"])
print("+",g["x"].shape)
assert g["x"].shape[0] <= 20
return g
#g_nx = nx.nx_agraph.from_agraph(nn_graph)
#A = nx.nx_agraph.to_agraph(g_nx) # convert to a graphviz graph
#A.layout() # neato layout
#A.draw("a.ps")
def visualise_list_of_nns(list_of_nns, save_dir, fig_labels=None, fig_file_names=None,
for_pres=False):
""" Visualises a list of neural networks. """
g_list = []
if fig_labels is None:
fig_labels = [None] * len(list_of_nns)
if fig_file_names is None:
fig_file_names = [str(idx) for idx in range(len(list_of_nns))]
for idx, nn in enumerate(list_of_nns):
save_file_prefix = os.path.join(save_dir, fig_file_names[idx])
g = visualise_nn(nn, save_file_prefix, fig_labels[idx], for_pres)
g_list.append(g)
return g_list
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class ItemListSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ItemList
fields = ['id', 'status', 'type', 'name', 'city']
class ExampleModelLessSerializer(serializers.Serializer):
project_name = serializers.CharField()
total_head_count = serializers.IntegerField()
start_date = serializers.DateTimeField()
location = serializers.CharField()
|
import functools
import os
from contextlib import contextmanager
from .platforms.basepath import BasePath, Root, InstallRoot, DestDir # noqa
from .platforms.host import platform_info
Path = platform_info().Path
def abspath(path, type=Path, **kwargs):
return type.abspath(path, **kwargs)
def commonprefix(paths):
if not paths or any(i.root != paths[0].root for i in paths):
return None
cls = type(paths[0])
split = [i.split() for i in paths]
lo, hi = min(split), max(split)
for i, bit in enumerate(lo):
if bit != hi[i]:
return cls(cls.sep.join(lo[:i]), paths[0].root, directory=True)
return cls(cls.sep.join(lo), paths[0].root, directory=(lo != hi))
def uniquetrees(paths):
def ischild(a, b):
for i, j in zip(a, b):
if i != j:
return False
return True
if not paths:
return []
paths = [(i, [i.root.value] + i.split()) for i in paths]
paths.sort(key=lambda i: i[1])
piter = iter(paths)
p, last = next(piter)
uniques = [p]
for p, bits in piter:
if not ischild(last, bits):
last = bits
uniques.append(p)
return uniques
def _wrap_ospath(fn):
@functools.wraps(fn)
def wrapper(path, variables=None):
return fn(path.string(variables))
return wrapper
exists = _wrap_ospath(os.path.exists)
isdir = _wrap_ospath(os.path.isdir)
isfile = _wrap_ospath(os.path.isfile)
islink = _wrap_ospath(os.path.islink)
def samefile(path1, path2, variables=None):
return os.path.samefile(path1.string(variables),
path2.string(variables))
def listdir(path, variables=None):
dirs, nondirs = [], []
try:
names = os.listdir(path.string(variables))
for name in names:
curpath = path.append(name)
if isdir(curpath, variables):
dirs.append(curpath.as_directory())
else:
nondirs.append(curpath)
except OSError:
pass
return dirs, nondirs
def walk(top, variables=None):
if not exists(top, variables):
return
dirs, nondirs = listdir(top, variables)
yield top, dirs, nondirs
for d in dirs:
if not islink(d, variables):
for i in walk(d, variables):
yield i
@contextmanager
def pushd(dirname, makedirs=False, mode=0o777, exist_ok=False):
old = os.getcwd()
if makedirs:
os.makedirs(dirname, mode, exist_ok)
os.chdir(dirname)
try:
yield
finally:
os.chdir(old)
|
import pytest
from tests.helpers.run_command import run_command
from tests.helpers.runif import RunIf
"""
A couple of sanity checks to make sure the model doesn't crash with different running options.
"""
def test_fast_dev_run():
"""Test running for 1 train, val and test batch."""
command = ["train.py", "++trainer.fast_dev_run=true"]
run_command(command)
@pytest.mark.slow
def test_cpu():
"""Test running 1 epoch on CPU."""
command = ["train.py", "++trainer.max_epochs=1", "++trainer.gpus=0"]
run_command(command)
# use RunIf to skip execution of some tests, e.g. when no gpus are available
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_gpu():
"""Test running 1 epoch on GPU."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
]
run_command(command)
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_mixed_precision():
"""Test running 1 epoch with pytorch native automatic mixed precision (AMP)."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
"++trainer.precision=16",
]
run_command(command)
@pytest.mark.slow
def test_double_validation_loop():
"""Test running 1 epoch with validation loop twice per epoch."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.val_check_interval=0.5",
]
run_command(command)
|
import argparse
import binascii
import sys
import time
from inkfish.proof_of_time import (create_proof_of_time_wesolowski,
create_proof_of_time_nwesolowski,
create_proof_of_time_pietrzak,
check_proof_of_time_wesolowski,
check_proof_of_time_nwesolowski,
check_proof_of_time_pietrzak)
from .classgroup import ClassGroup
from .create_discriminant import create_discriminant
def create_pot_parser():
parser = argparse.ArgumentParser(
description='Generate or verify a proof of time using the Chia ' +
'Verfiable Delay Function (VDF)',
)
parser.add_argument("-t", "--type", default="wesolowski",
choices=["wesolowski", "n-wesolowski", "pietrzak"],
help="the type of proof, wesolowski, n-wesolowski, or pietrzak")
parser.add_argument("-l", "--length", type=int, default=2048,
help="the number of bits of the discriminant")
parser.add_argument("-d", "--depth", type=int, default=2,
help="depth of n-wesolowski (n) default is 2")
parser.add_argument("-v", "--verbose", action="store_true",
help="print a bunch of extra stuff about the proof")
parser.add_argument("discriminant_challenge", type=binascii.unhexlify,
help="a hex-encoded challenge used to derive the discriminant")
parser.add_argument("iterations", type=int,
help="number of iterations")
parser.add_argument("proof", type=binascii.unhexlify,
help="the hex-encoded proof", nargs="?")
return parser
def pot(args=sys.argv):
parser = create_pot_parser()
args = parser.parse_args(args=args[1:])
discriminant = create_discriminant(args.discriminant_challenge, args.length)
if args.verbose:
print("proof type: %s" % args.type)
print("discriminant: %s" % discriminant)
print("discriminant size: %s" % args.length)
# Generator element is created as a=2, b=1.
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
if args.verbose:
print("x: %s" % str(x))
if args.proof:
if args.type == "wesolowski":
ok = check_proof_of_time_wesolowski(
discriminant, x, args.proof, args.iterations, args.length)
elif args.type == "n-wesolowski":
ok = check_proof_of_time_nwesolowski(
discriminant, x, args.proof, args.iterations, args.length)
elif args.type == "pietrzak":
ok = check_proof_of_time_pietrzak(
discriminant, x, args.proof, args.iterations, args.length)
if ok:
print("Proof is valid")
else:
print("** INVALID PROOF")
return -1
else:
start_t = time.time() * 1000
if args.type == "wesolowski":
result, proof = create_proof_of_time_wesolowski(
discriminant, x, args.iterations, args.length)
elif args.type == "n-wesolowski":
result, proof = create_proof_of_time_nwesolowski(
discriminant, x, args.iterations, args.length, args.depth, 0)
elif args.type == "pietrzak":
result, proof = create_proof_of_time_pietrzak(
discriminant, x, args.iterations, args.length)
if args.verbose:
print("Finished in ", round(((time.time() * 1000) - start_t), 2), "ms")
hex_result = binascii.hexlify(result).decode("utf8")
hex_proof = binascii.hexlify(proof).decode("utf8")
print(hex_result + hex_proof)
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
import pathlib
from ruamel import yaml
from qhub.schema import verify
from qhub.provider.cicd.linter import comment_on_pr
def create_validate_subcommand(subparser):
subparser = subparser.add_parser("validate")
subparser.add_argument(
"configdeprecated",
help="qhub configuration yaml file (deprecated - please pass in as -c/--config flag)",
nargs="?",
)
subparser.add_argument(
"-c", "--config", help="qhub configuration yaml file", required=False
)
subparser.add_argument(
"--enable-commenting", help="Turn on PR commenting", action="store_true"
)
subparser.set_defaults(func=handle_validate)
def handle_validate(args):
if args.configdeprecated and args.config:
raise ValueError(
"Please pass in -c/--config flag specifying your qhub-config.yaml file, and do NOT pass it as a standalone argument"
)
config_filename = args.config or args.configdeprecated
if not config_filename:
raise ValueError(
"Please pass in a qhub-config.yaml filename using the -c/--config argument"
)
config_filename = pathlib.Path(args.config or args.configdeprecated)
if not config_filename.is_file():
raise ValueError(
f"passed in configuration filename={config_filename} must exist"
)
with config_filename.open() as f:
config = yaml.safe_load(f.read())
if args.enable_commenting:
# for PR's only
comment_on_pr(config)
else:
verify(config)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import decimal
import io
import uuid
from flask import current_app
from flask import json as _json
from flask import request
from sqlalchemy import types
import arrow
text_type = str
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""Custom JSON encoder that will serialize more complex datatypes.
This class adds support for the following datatypes:
- ``phonenumbers.phonenumber.PhoneNumber``: This will be serialized to
a E.164 phonenumber. This will only be run if ``phonenumbers`` is
installed.
- ``decimal.Decimal``: This will serialize to a pretty decimal number with
no trailing zeros and no unnecessary values. For example:
- 2.01 -> 2.01
- 2.0 -> 2
- 2.010 -> 2.01
- 2.000 -> 2
- ``arrow.Arrow``: This will be serialized to an ISO8601 datetime string
with the offset included.
- ``datetime.datetime``: This will be serialized to an ISO8601 datetime
string with the offset included.
- ``datetime.date``: This will be serialized to an ISO8601 date string.
Extended from http://flask.pocoo.org/snippets/119.
"""
def __init__(self, *args, **kwargs):
super(JSONEncoder, self).__init__(*args, **kwargs)
self.use_decimal = False
def default(self, obj):
"""
Encode individual objects into their JSON representation.
This method is used by :class:`flask.json.JSONEncoder` to encode
individual items in the JSON object.
Args:
obj (object): Any Python object we wish to convert to JSON.
Returns:
str: The stringified, valid JSON representation of our provided
object.
"""
if isinstance(obj, decimal.Decimal):
obj = format(obj, 'f')
str_digit = str(obj)
return (str_digit.rstrip('0').rstrip('.')
if '.' in str_digit
else str_digit)
elif isinstance(obj, types.TypeEngine):
return str(obj)
elif isinstance(obj, arrow.Arrow):
return str(obj)
if isinstance(obj, datetime.datetime):
if obj.tzinfo:
# eg: '2015-09-25T23:14:42.588601+00:00'
return obj.isoformat('T')
else:
# No timezone present - assume UTC.
# eg: '2015-09-25T23:14:42.588601Z'
return obj.isoformat('T') + 'Z'
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, uuid.UUID):
return str(obj)
try:
return list(iter(obj))
except TypeError:
pass
return super(JSONEncoder, self).default(obj)
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def jsonify(*args, **kwargs):
"""
copied from the flask jsonify function with modifcations added
"""
indent = None
separators = (',', ':')
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR']\
and not request.is_xhr:
indent = 2
separators = (', ', ': ')
if args and kwargs:
raise TypeError(
'jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
return current_app.response_class(
(dumps(data, indent=indent, separators=separators), '\n'),
mimetype=current_app.config['JSONIFY_MIMETYPE']
)
|
"""An implementation of Matching Layer."""
import typing
import tensorflow as tf
from tensorflow.keras import layers
class MatchingLayer(layers.Layer):
"""
Layer that computes a matching matrix between samples in two tensors.
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param matching_type: the similarity function for matching
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.layers.MatchingLayer(matching_type='dot',
... normalize=True)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, num_dim],
... [num_batch, right_len, num_dim]])
"""
def __init__(self, normalize: bool = False,
matching_type: str = 'dot', **kwargs):
""":class:`MatchingLayer` constructor."""
super().__init__(**kwargs)
self._normalize = normalize
self._validate_matching_type(matching_type)
self._matching_type = matching_type
self._shape1 = None
self._shape2 = None
@classmethod
def _validate_matching_type(cls, matching_type: str = 'dot'):
valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat']
if matching_type not in valid_matching_type:
raise ValueError(f"{matching_type} is not a valid matching type, "
f"{valid_matching_type} expected.")
def build(self, input_shape: list):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
self._shape1 = input_shape[0]
self._shape2 = input_shape[1]
for idx in 0, 2:
if self._shape1[idx] != self._shape2[idx]:
raise ValueError(
'Incompatible dimensions: '
f'{self._shape1[idx]} != {self._shape2[idx]}.'
f'Layer shapes: {self._shape1}, {self._shape2}.'
)
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
if self._matching_type == 'dot':
if self._normalize:
x1 = tf.math.l2_normalize(x1, axis=2)
x2 = tf.math.l2_normalize(x2, axis=2)
return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3)
else:
if self._matching_type == 'mul':
def func(x, y):
return x * y
elif self._matching_type == 'plus':
def func(x, y):
return x + y
elif self._matching_type == 'minus':
def func(x, y):
return x - y
elif self._matching_type == 'concat':
def func(x, y):
return tf.concat([x, y], axis=3)
else:
raise ValueError(f"Invalid matching type."
f"{self._matching_type} received."
f"Mut be in `dot`, `mul`, `plus`, "
f"`minus` and `concat`.")
x1_exp = tf.stack([x1] * self._shape2[1], 2)
x2_exp = tf.stack([x2] * self._shape1[1], 1)
return func(x1_exp, x2_exp)
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if len(shape1) != 3 or len(shape2) != 3:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with 3 dimensions.')
if shape1[0] != shape2[0] or shape1[2] != shape2[2]:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with same 0,2 dimensions.')
if self._matching_type in ['mul', 'plus', 'minus']:
return shape1[0], shape1[1], shape2[1], shape1[2]
elif self._matching_type == 'dot':
return shape1[0], shape1[1], shape2[1], 1
elif self._matching_type == 'concat':
return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2]
else:
raise ValueError(f"Invalid `matching_type`."
f"{self._matching_type} received."
f"Must be in `mul`, `plus`, `minus` "
f"`dot` and `concat`.")
def get_config(self) -> dict:
"""Get the config dict of MatchingLayer."""
config = {
'normalize': self._normalize,
'matching_type': self._matching_type,
}
base_config = super(MatchingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
# python 2.7, pytorch 0.3.1
import os, sys
sys.path.insert(1, '../')
import torch
import cv2
import shutil
import torchvision
import numpy as np
import itertools
import subprocess
import random
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from PIL import Image
from pietorch import data_convertors
from pietorch.DuRN_P import cleaner as cleaner
from pietorch.DuRN_P_no_norm import cleaner as cleaner_no_norm
from pietorch.pytorch_ssim import ssim as ssim
from skimage.measure import compare_psnr as psnr
from skimage.measure import compare_ssim as ski_ssim
#------ Options -------
tag = 'DuRN_P_no_norm' # 'DuRN_P' or 'DuRN_P_no_norm' for gaussion or real-world noise removal
data_name = 'RealNoiseHKPoly' # 'BSD_gray' or 'RealNoiseHKPoly'
# Gaussian noise level. Comment it if you set data_name = 'RealNoiseHKPoly'.
#noise_level = 70 # choose one from [30, 50, 70]
#----------------------
if data_name == 'BSD_gray':
testroot = "../data/"+data_name+"/test/"
test_list_pth = '../lists/'+data_name+'/testlist.txt'
else:
testroot = "../data/"+data_name+"/test1/"
test_list_pth = '../lists/'+data_name+'/test1_list.txt'
Pretrained = '../trainedmodels/'+data_name+'/'+tag+'_model.pt'
show_dst = '../cleaned_images/'+data_name+'/'+tag+'/'
subprocess.check_output(['mkdir', '-p', show_dst])
# Make the transformer and the network
if data_name == 'BSD_gray':
transform = [transforms.ToTensor(), noise_level]
cleaner = cleaner().cuda()
else:
transform = transforms.ToTensor()
cleaner = cleaner_no_norm().cuda()
cleaner.load_state_dict(torch.load(Pretrained))
cleaner.eval()
# Make the dataloader
convertor = data_convertors.ConvertImageSet(testroot, test_list_pth, data_name,
transform=transform)
dataloader = DataLoader(convertor, batch_size=1, shuffle=False, num_workers=1)
ave_psnr = 0
ave_ssim = 0
ct_num = 0
for i, data in enumerate(dataloader):
ct_num+= 1.0
im_input, label, im_name = data
im_input = Variable(im_input, requires_grad=False).cuda()
res = cleaner(im_input)
res = res.data.cpu().numpy()
res[res>1] = 1
res[res<0] = 0
res*= 255
if data_name == 'BSD_gray':
res = res.astype(np.uint8)[0,0]
label = label.numpy()[0,0]
label*= 255
label = label.astype(np.uint8)
cv2.imwrite(show_dst+im_name[0].split('.')[0]+'_'+str(noise_level)+'.png', res)
ave_psnr+= psnr(res, label, data_range=255)
ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=False)
elif data_name == 'RealNoiseHKPoly':
res = res.astype(np.uint8)[0]
res = res.transpose((1,2,0))
label = label.numpy()[0].transpose((1,2,0))
label*= 255
label = label.astype(np.uint8)
Image.fromarray(res).save(show_dst+im_name[0].split('real')[0]+'.png')
ave_psnr+= psnr(res, label, data_range=255)
ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=True)
else:
print('Unknown dataset name.')
print('psnr: '+str(ave_psnr/ct_num))
print('ssim: '+str(ave_ssim/ct_num))
print('Test done.')
|
# Simple image recommender
#
# required:
# data/images: a folder containing your images dataset
# data/users: can be empty, but the folder needs to exist (for now ?)
#
# optional:
# data/tags.csv: a comma-separated list containing the names of your
# images and the corresponding semicolon-separated tags
# (eg. "37.png,sky;blue;cliff")
# Libraries import
from PIL import Image
from sklearn.cluster import MiniBatchKMeans
from operator import itemgetter
import pandas
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pandas as pd
import json
import math
import os
import json
import csv
# User data gathering
def user_data_gathering():
name = input("Please enter your username: ")
user_favs = []
user_dislikes = []
try:
with open("data/users/" + name + ".txt", "r") as userfile:
user_favs = userfile.readline().rstrip().split(",")
user_dislikes = userfile.readline().rstrip().split(",")
except FileNotFoundError:
print("This user doesn't exist. Creating it...")
if not user_favs:
print("No favourite images defined!")
if not user_dislikes:
print("No disliked images defined!")
do_fav = input("Would you like to define your favourite images? ([y]es/[n]o/[a]dd): ")
if do_fav == "y":
user_favs = input("Please enter your favourite images, separated by a comma: ").split(",")
elif do_fav == "a":
user_favs += input("Please enter the images you want to add, separated by a comma: ").split(",")
elif do_fav == "n":
pass
else:
print("Incorrect choice. Exiting")
exit()
do_dislike = input("Would you like to define your disliked images? ([y]es/[n]o/[a]dd): ")
if do_dislike == "y":
user_dislikes = input("Please enter your disliked images, separated by a comma: ").split(",")
elif do_dislike == "a":
user_dislikes += input("Please enter the images you want to add, separated by a comma: ").split(",")
elif do_dislike == "n":
pass
else:
print("Incorrect choice. Exiting")
exit()
userfile = open("data/users/" + name + ".txt", "w+")
userfile.write(",".join(user_favs) + "\n")
userfile.write(",".join(user_dislikes) + "\n")
userfile.close()
return user_favs,user_dislikes
# Get all images filenames in data/images/
def get_image_list():
imagelist = []
for file in os.listdir("data/images"):
if file.endswith(".png") or file.endswith(".jpg") or file.endswith(".gif") or file.endswith(".tif") or file.endswith(".bmp"):
imagelist.append(file)
return imagelist
# Get color clusters per image
def get_clusters(filename, n_clusters):
imgfile = Image.open("data/images/" + filename).convert('RGBA')
numarray = np.array(imgfile.getdata(), np.uint8)
clusters = MiniBatchKMeans(n_clusters=n_clusters)
clusters.fit(numarray)
npbins = np.arange(0, n_clusters + 1)
histogram = np.histogram(clusters.labels_, bins=npbins)
# Sort histogram
pairs = sorted(zip(histogram[0], histogram[1]), key=itemgetter(0))
histogram = (np.array([v for v, i in pairs]),
np.array([i for v, i in pairs]))
colors = []
for i in range(n_clusters):
j = histogram[1][i]
colors.append(
(
math.ceil(clusters.cluster_centers_[j][0]),
math.ceil(clusters.cluster_centers_[j][1]),
math.ceil(clusters.cluster_centers_[j][2])
)
)
return colors
# Returns a pandas dataframe with the tags info
def get_tags(filename):
try:
tags_df = pd.read_csv(filename)
except FileNotFoundError:
print("No tags have been defined. Ignoring tags.")
tags_df["tags"] = tags_df.tags.str.split(";")
return tags_df
# Clean the clusters data
def clean_data(clusters):
for image in clusters:
tmp = []
for color in image["colors"]:
tmp.append(((color[0])<<16)|((color[1])<<8)|(color[2]))
image["colors"] = tmp
tmp = []
return clusters
# The actual prediction algorithm
def predict(clusters, user_fav, user_dislikes):
images = sorted(clusters, key=lambda x: x['name'])
color_clusters = [image["colors"] for image in images]
# Build training data
training_data = color_clusters
result_data = [(image['name'] in user_fav) for image in images]
# Build dataframes
training_df = pandas.DataFrame(training_data, columns=['color1', 'color2', 'color3'])
result_df = pandas.DataFrame(result_data, columns=['favorite'])
# Train decision tree
classifier = RandomForestClassifier(n_estimators=10, max_depth=10)
classifier = classifier.fit(training_df, result_df.values.ravel())
predicted = classifier.predict(list(map(lambda x: x['colors'], images)))
print("# Predicted as favorites")
for index, favorite in enumerate(predicted):
name = images[index]['name']
# Only print new images
if favorite and name not in user_fav and name not in user_dislikes:
print(name)
# Main function
def main():
print("Loading...")
print(" -- Looking up images...")
imagelist = get_image_list()
print(" -- Calculating color clusters (this can take some time if it has never been done before)...")
n_clusters = 3
try:
clustersData = open("data/clusters.json", "r")
clusters = json.load(clustersData)
except:
clusters = [{"name":filename, "colors":get_clusters(filename, n_clusters)} for filename in imagelist]
r = json.dumps(clusters)
clusersfile = open("data/clusters.json", "w")
clusersfile.write(r)
clusersfile.close()
print(" -- Extracting tags...")
tags = get_tags("data/tags.csv")
print("Loading done!")
# Gathering user data
print("Gathering user data...")
(user_favs, user_dislikes) = user_data_gathering()
# Recommendation system
print("Computing recommendation...")
cleanedclusters = clean_data(clusters)
predict(cleanedclusters, user_favs, user_dislikes)
if __name__ == "__main__":
main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A Python wrapper that loads _pywrap_tensorflow_internal.so."""
import ctypes
import sys
import traceback
from tensorflow.python.platform import self_check
# TODO(mdan): Cleanup antipattern: import for side effects.
# Perform pre-load sanity checks in order to produce a more actionable error.
self_check.preload_check()
# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
try:
# This import is expected to fail if there is an explicit shared object
# dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.
from tensorflow.python import pywrap_dlopen_global_flags
_use_dlopen_global_flags = True
except ImportError:
_use_dlopen_global_flags = False
# On UNIX-based platforms, pywrap_tensorflow is a python library that
# dynamically loads _pywrap_tensorflow.so.
_can_set_rtld_local = (
hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'))
if _can_set_rtld_local:
_default_dlopen_flags = sys.getdlopenflags()
try:
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.set_dlopen_flags()
elif _can_set_rtld_local:
# Ensure RTLD_LOCAL behavior for platforms where it isn't the default
# (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not
# override an RTLD_GLOBAL in _default_dlopen_flags).
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)
# Python2.7 does not have a ModuleNotFoundError.
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import,g-import-not-at-top,line-too-long,undefined-variable
try:
from tensorflow.python._pywrap_tensorflow_internal import *
# This try catch logic is because there is no bazel equivalent for py_extension.
# Externally in opensource we must enable exceptions to load the shared object
# by exposing the PyInit symbols with pybind. This error will only be
# caught internally or if someone changes the name of the target _pywrap_tensorflow_internal.
# This logic is used in other internal projects using py_extension.
except ModuleNotFoundError:
pass
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.reset_dlopen_flags()
elif _can_set_rtld_local:
sys.setdlopenflags(_default_dlopen_flags)
except ImportError:
raise ImportError(
f'{traceback.format_exc()}'
f'\n\nFailed to load the native TensorFlow runtime.\n'
f'See https://www.tensorflow.org/install/errors '
f'for some common causes and solutions.\n'
f'If you need help, create an issue '
f'at https://github.com/tensorflow/tensorflow/issues '
f'and include the entire stack trace above this error message.')
# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @author Neil Vaytet
from .. import config
from ..core import concatenate, values, dtype, units, nanmin, nanmax, histogram, \
full_like
from ..core import Variable, DataArray
from ..core import abs as abs_
import numpy as np
from copy import copy
import io
def get_line_param(name=None, index=None):
"""
Get the default line parameter from the config.
If an index is supplied, return the i-th item in the list.
"""
param = getattr(config.plot, name)
return param[index % len(param)]
def to_bin_centers(x, dim):
"""
Convert array edges to centers
"""
return 0.5 * (x[dim, 1:] + x[dim, :-1])
def to_bin_edges(x, dim):
"""
Convert array centers to edges
"""
idim = x.dims.index(dim)
if x.shape[idim] < 2:
one = 1.0 * x.unit
return concatenate(x[dim, 0:1] - one, x[dim, 0:1] + one, dim)
else:
center = to_bin_centers(x, dim)
# Note: use range of 0:1 to keep dimension dim in the slice to avoid
# switching round dimension order in concatenate step.
left = center[dim, 0:1] - (x[dim, 1] - x[dim, 0])
right = center[dim, -1] + (x[dim, -1] - x[dim, -2])
return concatenate(concatenate(left, center, dim), right, dim)
def parse_params(params=None, defaults=None, globs=None, array=None):
"""
Construct the colorbar settings using default and input values
"""
from matplotlib.colors import Normalize, LogNorm, LinearSegmentedColormap
from matplotlib import cm
parsed = dict(config.plot.params)
if defaults is not None:
for key, val in defaults.items():
parsed[key] = val
if globs is not None:
for key, val in globs.items():
# Global parameters need special treatment because by default they
# are set to None, and we don't want to overwrite the defaults.
if val is not None:
parsed[key] = val
if params is not None:
if isinstance(params, bool):
params = {"show": params}
for key, val in params.items():
parsed[key] = val
if parsed["norm"] == "log":
norm = LogNorm
elif parsed["norm"] == "linear":
norm = Normalize
else:
raise RuntimeError("Unknown norm. Expected 'linear' or 'log', "
"got {}.".format(parsed["norm"]))
vmin = parsed["vmin"]
vmax = parsed["vmax"]
parsed["norm"] = norm(vmin=vmin.value if vmin is not None else None,
vmax=vmax.value if vmax is not None else None)
# Convert color into custom colormap
if parsed["color"] is not None:
parsed["cmap"] = LinearSegmentedColormap.from_list(
"tmp", [parsed["color"], parsed["color"]])
else:
parsed["cmap"] = copy(cm.get_cmap(parsed["cmap"]))
if parsed["under_color"] is None:
parsed["cmap"].set_under(parsed["cmap"](0.0))
else:
parsed["cmap"].set_under(parsed["under_color"])
if parsed["over_color"] is None:
parsed["cmap"].set_over(parsed["cmap"](1.0))
else:
parsed["cmap"].set_over(parsed["over_color"])
return parsed
def vars_to_err(v):
"""
Convert variances to errors.
"""
with np.errstate(invalid="ignore"):
v = np.sqrt(v)
np.nan_to_num(v, copy=False)
return v
def find_log_limits(x):
"""
To find log scale limits, we histogram the data between 1.0-30
and 1.0e+30 and include only bins that are non-zero.
"""
from .. import flatten, ones
volume = np.product(x.shape)
pixel = flatten(values(x.astype(dtype.float64)), to='pixel')
weights = ones(dims=['pixel'], shape=[volume], unit='counts')
hist = histogram(DataArray(data=weights, coords={'order': pixel}),
bins=Variable(dims=['order'],
values=np.geomspace(1e-30, 1e30, num=61),
unit=x.unit))
# Find the first and the last non-zero bins
inds = np.nonzero((hist.data > 0.0 * units.counts).values)
ar = np.arange(hist.data.shape[0])[inds]
# Safety check in case there are no values in range 1.0e-30:1.0e+30:
# fall back to the linear method and replace with arbitrary values if the
# limits are negative.
if len(ar) == 0:
[vmin, vmax] = find_linear_limits(x)
if vmin.value <= 0.0:
if vmax.value <= 0.0:
vmin = full_like(vmin, 0.1)
vmax = full_like(vmax, 1.0)
else:
vmin = 1.0e-3 * vmax
else:
vmin = hist.coords['order']['order', ar.min()]
vmax = hist.coords['order']['order', ar.max() + 1]
return [vmin, vmax]
def find_linear_limits(x):
"""
Find variable min and max.
"""
return [
values(nanmin(x).astype(dtype.float64)),
values(nanmax(x).astype(dtype.float64))
]
def find_limits(x, scale=None, flip=False):
"""
Find sensible limits, depending on linear or log scale.
"""
if scale is not None:
if scale == "log":
lims = {"log": find_log_limits(x)}
else:
lims = {"linear": find_linear_limits(x)}
else:
lims = {"log": find_log_limits(x), "linear": find_linear_limits(x)}
if flip:
for key in lims:
lims[key] = np.flip(lims[key]).copy()
return lims
def fix_empty_range(lims, replacement=None):
"""
Range correction in case xmin == xmax
"""
dx = 0.0 * lims[0].unit
if lims[0].value == lims[1].value:
if replacement is not None:
dx = 0.5 * replacement
elif lims[0].value == 0.0:
dx = 0.5 * lims[0].unit
else:
dx = 0.5 * abs_(lims[0])
return [lims[0] - dx, lims[1] + dx]
def fig_to_pngbytes(fig):
"""
Convert figure to png image bytes.
We also close the figure to prevent it from showing up again in
cells further down the notebook.
"""
import matplotlib.pyplot as plt
buf = io.BytesIO()
fig.savefig(buf, format='png')
plt.close(fig)
buf.seek(0)
return buf.getvalue()
def to_dict(meta):
"""
Convert a coords, meta, attrs or masks object to a python dict.
"""
return {name: var for name, var in meta.items()}
|
# -*- Python -*-
# Return the options to use for a C++ library or binary build.
# Uses the ":optmode" config_setting to pick the options.
load(
"//tensorflow/core/platform:default/build_config_root.bzl",
"if_dynamic_kernels",
"if_static",
"tf_additional_grpc_deps_py",
"tf_additional_xla_deps_py",
"tf_cuda_tests_tags",
"tf_exec_compatible_with",
"tf_gpu_tests_tags",
"tf_sycl_tests_tags",
)
load(
"@local_config_tensorrt//:build_defs.bzl",
"if_tensorrt",
)
load(
"//tensorflow/core/platform:default/cuda_build_defs.bzl",
"if_cuda_is_configured",
)
load(
"@local_config_cuda//cuda:build_defs.bzl",
"cuda_default_copts",
"if_cuda",
)
load(
"@local_config_rocm//rocm:build_defs.bzl",
"if_rocm",
"if_rocm_is_configured",
"rocm_copts",
"rocm_default_copts",
)
load(
"//third_party/mkl:build_defs.bzl",
"if_enable_mkl",
"if_mkl",
"if_mkl_lnx_x64",
"if_mkl_ml",
"mkl_deps",
)
load(
"//third_party/mkl_dnn:build_defs.bzl",
"if_mkl_open_source_only",
"if_mkldnn_threadpool",
)
load(
"//third_party/ngraph:build_defs.bzl",
"if_ngraph",
)
def register_extension_info(**kwargs):
pass
# version for the shared libraries, can
# not contain rc or alpha, only numbers.
# Also update tensorflow/core/public/version.h
# and tensorflow/tools/pip_package/setup.py
VERSION = "1.15.5"
VERSION_MAJOR = VERSION.split(".")[0]
def if_v2(a):
return select({
clean_dep("//tensorflow:api_version_2"): a,
"//conditions:default": [],
})
def if_not_v2(a):
return select({
clean_dep("//tensorflow:api_version_2"): [],
"//conditions:default": a,
})
def if_cuda_is_configured_compat(x):
return if_cuda_is_configured(x)
# Given a source file, generate a test name.
# i.e. "common_runtime/direct_session_test.cc" becomes
# "common_runtime_direct_session_test"
def src_to_test_name(src):
return src.replace("/", "_").replace(":", "_").split(".")[0]
def full_path(relative_paths):
return [native.package_name() + "/" + relative for relative in relative_paths]
def _add_tfcore_prefix(src):
if src.startswith("//"):
return src
return "//tensorflow/core:" + src
# List of proto files for android builds
def tf_android_core_proto_sources(core_proto_sources_relative):
return [
_add_tfcore_prefix(p)
for p in core_proto_sources_relative
]
# Returns the list of pb.h and proto.h headers that are generated for
# tf_android_core_proto_sources().
def tf_android_core_proto_headers(core_proto_sources_relative):
return ([
_add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".pb.h")
for p in core_proto_sources_relative
] + [
_add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".proto.h")
for p in core_proto_sources_relative
])
# Wrapper for portable protos which currently just creates an empty rule.
def tf_portable_proto_library(name, proto_deps, **kwargs):
_ignore = [kwargs]
native.cc_library(name = name, deps = proto_deps)
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
def if_android_x86(a):
return select({
clean_dep("//tensorflow:android_x86"): a,
clean_dep("//tensorflow:android_x86_64"): a,
"//conditions:default": [],
})
def if_android_arm(a):
return select({
clean_dep("//tensorflow:android_arm"): a,
"//conditions:default": [],
})
def if_android_arm64(a):
return select({
clean_dep("//tensorflow:android_arm64"): a,
"//conditions:default": [],
})
def if_android_mips(a):
return select({
clean_dep("//tensorflow:android_mips"): a,
"//conditions:default": [],
})
def if_not_android(a):
return select({
clean_dep("//tensorflow:android"): [],
"//conditions:default": a,
})
def if_not_android_mips_and_mips64(a):
return select({
clean_dep("//tensorflow:android_mips"): [],
clean_dep("//tensorflow:android_mips64"): [],
"//conditions:default": a,
})
def if_android(a):
return select({
clean_dep("//tensorflow:android"): a,
"//conditions:default": [],
})
def if_emscripten(a):
return select({
clean_dep("//tensorflow:emscripten"): a,
"//conditions:default": [],
})
def if_macos(a, otherwise = []):
return select({
clean_dep("//tensorflow:macos"): a,
"//conditions:default": otherwise,
})
def if_ios(a):
return select({
clean_dep("//tensorflow:ios"): a,
"//conditions:default": [],
})
def if_ios_x86_64(a):
return select({
clean_dep("//tensorflow:ios_x86_64"): a,
"//conditions:default": [],
})
def if_mobile(a):
return select({
clean_dep("//tensorflow:android"): a,
clean_dep("//tensorflow:ios"): a,
"//conditions:default": [],
})
def if_not_mobile(a):
return select({
clean_dep("//tensorflow:android"): [],
clean_dep("//tensorflow:ios"): [],
"//conditions:default": a,
})
# Config setting selector used when building for products
# which requires restricted licenses to be avoided.
def if_not_lgpl_restricted(a):
_ = (a,)
return select({
"//conditions:default": [],
})
def if_not_windows(a):
return select({
clean_dep("//tensorflow:windows"): [],
"//conditions:default": a,
})
def if_windows(a, otherwise = []):
return select({
clean_dep("//tensorflow:windows"): a,
"//conditions:default": otherwise,
})
def if_windows_cuda(a, otherwise = []):
return select({
clean_dep("//tensorflow:with_cuda_support_windows_override"): a,
"//conditions:default": otherwise,
})
def if_linux_x86_64(a):
return select({
clean_dep("//tensorflow:linux_x86_64"): a,
"//conditions:default": [],
})
def if_override_eigen_strong_inline(a):
return select({
clean_dep("//tensorflow:override_eigen_strong_inline"): a,
"//conditions:default": [],
})
def if_nccl(if_true, if_false = []):
return select({
"//tensorflow:no_nccl_support": if_false,
"//tensorflow:windows": if_false,
"//conditions:default": if_true,
})
def get_win_copts(is_external = False):
WINDOWS_COPTS = [
"/DPLATFORM_WINDOWS",
"/DEIGEN_HAS_C99_MATH",
"/DTENSORFLOW_USE_EIGEN_THREADPOOL",
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018", # -Wno-sign-compare
# Bazel's CROSSTOOL currently pass /EHsc to enable exception by
# default. We can't pass /EHs-c- to disable exception, otherwise
# we will get a waterfall of flag conflict warnings. Wait for
# Bazel to fix this.
# "/D_HAS_EXCEPTIONS=0",
# "/EHs-c-",
"/wd4577",
"/DNOGDI",
]
if is_external:
return WINDOWS_COPTS + ["/UTF_COMPILE_LIBRARY"]
else:
return WINDOWS_COPTS + ["/DTF_COMPILE_LIBRARY"]
# LINT.IfChange
def tf_copts(
android_optimization_level_override = "-O2",
is_external = False,
allow_exceptions = False):
# For compatibility reasons, android_optimization_level_override
# is currently only being set for Android.
# To clear this value, and allow the CROSSTOOL default
# to be used, pass android_optimization_level_override=None
android_copts = [
"-DTF_LEAN_BINARY",
"-Wno-narrowing",
"-fomit-frame-pointer",
]
if android_optimization_level_override:
android_copts.append(android_optimization_level_override)
return (
if_not_windows([
"-DEIGEN_AVOID_STL_ARRAY",
"-Iexternal/gemmlowp",
"-Wno-sign-compare",
"-ftemplate-depth=900",
]) +
(if_not_windows(["-fno-exceptions"]) if not allow_exceptions else []) +
if_cuda(["-DGOOGLE_CUDA=1"]) +
if_tensorrt(["-DGOOGLE_TENSORRT=1"]) +
if_nccl(["-DGOOGLE_NCCL=1"]) +
if_mkl(["-DINTEL_MKL=1", "-DENABLE_MKLDNN_V1", "-DENABLE_INTEL_MKL_BFLOAT16"]) +
if_mkl_open_source_only(["-DINTEL_MKL_DNN_ONLY"]) +
if_mkldnn_threadpool(["-DENABLE_MKLDNN_THREADPOOL"]) +
if_enable_mkl(["-DENABLE_MKL"]) +
if_ngraph(["-DINTEL_NGRAPH=1"]) +
if_android_arm(["-mfpu=neon"]) +
if_linux_x86_64(["-msse3"]) +
if_ios_x86_64(["-msse4.1"]) +
select({
clean_dep("//tensorflow:framework_shared_object"): [],
"//conditions:default": ["-DTENSORFLOW_MONOLITHIC_BUILD"],
}) +
select({
clean_dep("//tensorflow:android"): android_copts,
clean_dep("//tensorflow:macos"): [],
clean_dep("//tensorflow:windows"): get_win_copts(is_external),
clean_dep("//tensorflow:ios"): [],
clean_dep("//tensorflow:no_lgpl_deps"): ["-D__TENSORFLOW_NO_LGPL_DEPS__", "-pthread"],
"//conditions:default": ["-pthread"],
})
)
def tf_openmp_copts():
return (if_mkl_lnx_x64(["-fopenmp"]) + if_mkldnn_threadpool(["-fno-openmp"]))
def tfe_xla_copts():
return select({
"//tensorflow:with_xla_support": ["-DTENSORFLOW_EAGER_USE_XLA"],
"//conditions:default": [],
})
def tf_opts_nortti_if_android():
return if_android([
"-fno-rtti",
"-DGOOGLE_PROTOBUF_NO_RTTI",
"-DGOOGLE_PROTOBUF_NO_STATIC_INITIALIZER",
])
# LINT.ThenChange(//tensorflow/contrib/android/cmake/CMakeLists.txt)
def tf_opts_nortti_if_emscripten():
return if_emscripten([
"-fno-rtti",
"-DGOOGLE_PROTOBUF_NO_RTTI",
"-DGOOGLE_PROTOBUF_NO_STATIC_INITIALIZER",
])
def tf_features_nomodules_if_android():
return if_android(["-use_header_modules"])
def tf_features_nomodules_if_emscripten():
return if_emscripten(["-use_header_modules"])
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate a library for that file.
def tf_gen_op_libs(op_lib_names, deps = None, is_external = True):
# Make library out of each op so it can also be used to generate wrappers
# for various languages.
if not deps:
deps = []
for n in op_lib_names:
native.cc_library(
name = n + "_op_lib",
copts = tf_copts(is_external = is_external),
srcs = ["ops/" + n + ".cc"],
deps = deps + [clean_dep("//tensorflow/core:framework")],
visibility = ["//visibility:public"],
alwayslink = 1,
linkstatic = 1,
)
def _make_search_paths(prefix, levels_to_root):
return ",".join(
[
"-rpath,%s/%s" % (prefix, "/".join([".."] * search_level))
for search_level in range(levels_to_root + 1)
],
)
def _rpath_linkopts(name):
# Search parent directories up to the TensorFlow root directory for shared
# object dependencies, even if this op shared object is deeply nested
# (e.g. tensorflow/contrib/package:python/ops/_op_lib.so). tensorflow/ is then
# the root and tensorflow/libtensorflow_framework.so should exist when
# deployed. Other shared object dependencies (e.g. shared between contrib/
# ops) are picked up as long as they are in either the same or a parent
# directory in the tensorflow/ tree.
levels_to_root = native.package_name().count("/") + name.count("/")
return select({
clean_dep("//tensorflow:macos"): [
"-Wl,%s" % (_make_search_paths("@loader_path", levels_to_root),),
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,%s" % (_make_search_paths("$$ORIGIN", levels_to_root),),
],
})
# Bazel-generated shared objects which must be linked into TensorFlow binaries
# to define symbols from //tensorflow/core:framework and //tensorflow/core:lib.
def tf_binary_additional_srcs(fullversion = False):
if fullversion:
suffix = "." + VERSION
else:
suffix = "." + VERSION_MAJOR
return if_static(
extra_deps = [],
macos = [
clean_dep("//tensorflow:libtensorflow_framework%s.dylib" % suffix),
],
otherwise = [
clean_dep("//tensorflow:libtensorflow_framework.so%s" % suffix),
],
)
def tf_binary_additional_data_deps():
return if_static(
extra_deps = [],
macos = [
clean_dep("//tensorflow:libtensorflow_framework.dylib"),
clean_dep("//tensorflow:libtensorflow_framework.%s.dylib" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow_framework.%s.dylib" % VERSION),
],
otherwise = [
clean_dep("//tensorflow:libtensorflow_framework.so"),
clean_dep("//tensorflow:libtensorflow_framework.so.%s" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow_framework.so.%s" % VERSION),
],
)
def tf_binary_pybind_deps():
return select({
clean_dep("//tensorflow:macos"): [
clean_dep(
"//tensorflow/python:_pywrap_tensorflow_internal_macos",
),
],
clean_dep("//tensorflow:windows"): [
clean_dep(
"//tensorflow/python:_pywrap_tensorflow_internal_windows",
),
],
"//conditions:default": [
clean_dep(
"//tensorflow/python:_pywrap_tensorflow_internal_linux",
),
],
})
# Helper function for the per-OS tensorflow libraries and their version symlinks
def tf_shared_library_deps():
return select({
clean_dep("//tensorflow:macos_with_framework_shared_object"): [
clean_dep("//tensorflow:libtensorflow.dylib"),
clean_dep("//tensorflow:libtensorflow.%s.dylib" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow.%s.dylib" % VERSION),
],
clean_dep("//tensorflow:macos"): [],
clean_dep("//tensorflow:windows"): [
clean_dep("//tensorflow:tensorflow.dll"),
clean_dep("//tensorflow:tensorflow_dll_import_lib"),
],
clean_dep("//tensorflow:framework_shared_object"): [
clean_dep("//tensorflow:libtensorflow.so"),
clean_dep("//tensorflow:libtensorflow.so.%s" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow.so.%s" % VERSION),
],
"//conditions:default": [],
}) + tf_binary_additional_srcs()
# Helper functions to add kernel dependencies to tf binaries when using dynamic
# kernel linking.
def tf_binary_dynamic_kernel_dsos():
return if_dynamic_kernels(
extra_deps = [
"//tensorflow/core/kernels:libtfkernel_all_kernels.so",
],
otherwise = [],
)
# Helper functions to add kernel dependencies to tf binaries when using static
# kernel linking.
def tf_binary_dynamic_kernel_deps(kernels):
return if_dynamic_kernels(
extra_deps = [],
otherwise = kernels,
)
# Shared libraries have different name pattern on different platforms,
# but cc_binary cannot output correct artifact name yet,
# so we generate multiple cc_binary targets with all name patterns when necessary.
# TODO(pcloudy): Remove this workaround when https://github.com/bazelbuild/bazel/issues/4570
# is done and cc_shared_library is available.
SHARED_LIBRARY_NAME_PATTERNS = [
"lib%s.so%s", # On Linux, shared libraries are usually named as libfoo.so
"lib%s%s.dylib", # On macos, shared libraries are usually named as libfoo.dylib
"%s%s.dll", # On Windows, shared libraries are usually named as foo.dll
]
def tf_cc_shared_object(
name,
srcs = [],
deps = [],
data = [],
linkopts = [],
framework_so = tf_binary_additional_srcs(),
soversion = None,
kernels = [],
per_os_targets = False, # Generate targets with SHARED_LIBRARY_NAME_PATTERNS
visibility = None,
**kwargs):
"""Configure the shared object (.so) file for TensorFlow."""
if soversion != None:
suffix = "." + str(soversion).split(".")[0]
longsuffix = "." + str(soversion)
else:
suffix = ""
longsuffix = ""
if per_os_targets:
names = [
(
pattern % (name, ""),
pattern % (name, suffix),
pattern % (name, longsuffix),
)
for pattern in SHARED_LIBRARY_NAME_PATTERNS
]
else:
names = [(
name,
name + suffix,
name + longsuffix,
)]
for name_os, name_os_major, name_os_full in names:
# Windows DLLs cant be versioned
if name_os.endswith(".dll"):
name_os_major = name_os
name_os_full = name_os
if name_os != name_os_major:
native.genrule(
name = name_os + "_sym",
outs = [name_os],
srcs = [name_os_major],
output_to_bindir = 1,
cmd = "ln -sf $$(basename $<) $@",
)
native.genrule(
name = name_os_major + "_sym",
outs = [name_os_major],
srcs = [name_os_full],
output_to_bindir = 1,
cmd = "ln -sf $$(basename $<) $@",
)
soname = name_os_major.split("/")[-1]
data_extra = []
if framework_so != []:
data_extra = tf_binary_additional_data_deps()
native.cc_binary(
name = name_os_full,
srcs = srcs + framework_so,
deps = deps,
linkshared = 1,
data = data + data_extra,
linkopts = linkopts + _rpath_linkopts(name_os_full) + select({
clean_dep("//tensorflow:macos"): [
"-Wl,-install_name,@rpath/" + soname,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,-soname," + soname,
],
}),
visibility = visibility,
**kwargs
)
flat_names = [item for sublist in names for item in sublist]
if name not in flat_names:
native.filegroup(
name = name,
srcs = select({
"//tensorflow:windows": [":%s.dll" % (name)],
"//tensorflow:macos": [":lib%s%s.dylib" % (name, longsuffix)],
"//conditions:default": [":lib%s.so%s" % (name, longsuffix)],
}),
visibility = visibility,
)
register_extension_info(
extension_name = "tf_cc_shared_object",
label_regex_for_dep = "{extension_name}",
)
# Links in the framework shared object
# (//third_party/tensorflow:libtensorflow_framework.so) when not building
# statically. Also adds linker options (rpaths) so that the framework shared
# object can be found.
def tf_cc_binary(
name,
srcs = [],
deps = [],
data = [],
linkopts = [],
copts = tf_copts(),
kernels = [],
per_os_targets = False, # Generate targets with SHARED_LIBRARY_NAME_PATTERNS
visibility = None,
**kwargs):
if kernels:
added_data_deps = tf_binary_dynamic_kernel_dsos()
else:
added_data_deps = []
if per_os_targets:
names = [pattern % (name, "") for pattern in SHARED_LIBRARY_NAME_PATTERNS]
else:
names = [name]
for name_os in names:
native.cc_binary(
name = name_os,
copts = copts,
srcs = srcs + tf_binary_additional_srcs(),
deps = deps + tf_binary_dynamic_kernel_deps(kernels) + if_mkl_ml(
[
clean_dep("//third_party/mkl:intel_binary_blob"),
],
),
data = depset(data + added_data_deps),
linkopts = linkopts + _rpath_linkopts(name_os),
visibility = visibility,
**kwargs
)
if name not in names:
native.filegroup(
name = name,
srcs = select({
"//tensorflow:windows": [":%s.dll" % name],
"//tensorflow:macos": [":lib%s.dylib" % name],
"//conditions:default": [":lib%s.so" % name],
}),
visibility = visibility,
)
register_extension_info(
extension_name = "tf_cc_binary",
label_regex_for_dep = "{extension_name}.*",
)
# A simple wrap around native.cc_binary rule.
# When using this rule, you should realize it doesn't link to any tensorflow
# dependencies by default.
def tf_native_cc_binary(
name,
copts = tf_copts(),
linkopts = [],
**kwargs):
native.cc_binary(
name = name,
copts = copts,
linkopts = select({
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:macos"): [
"-lm",
],
"//conditions:default": [
"-lpthread",
"-lm",
],
}) + linkopts + _rpath_linkopts(name),
**kwargs
)
register_extension_info(
extension_name = "tf_native_cc_binary",
label_regex_for_dep = "{extension_name}.*",
)
def tf_gen_op_wrapper_cc(
name,
out_ops_file,
pkg = "",
op_gen = clean_dep("//tensorflow/cc:cc_op_gen_main"),
deps = None,
include_internal_ops = 0,
# ApiDefs will be loaded in the order specified in this list.
api_def_srcs = []):
# Construct an op generator binary for these ops.
tool = out_ops_file + "_gen_cc"
if deps == None:
deps = [pkg + ":" + name + "_op_lib"]
tf_cc_binary(
name = tool,
copts = tf_copts(),
linkopts = if_not_windows(["-lm", "-Wl,-ldl"]),
linkstatic = 1, # Faster to link this one-time-use binary dynamically
deps = [op_gen] + deps,
)
srcs = api_def_srcs[:]
if not api_def_srcs:
api_def_args_str = ","
else:
api_def_args = []
for api_def_src in api_def_srcs:
# Add directory of the first ApiDef source to args.
# We are assuming all ApiDefs in a single api_def_src are in the
# same directory.
api_def_args.append(
" $$(dirname $$(echo $(locations " + api_def_src +
") | cut -d\" \" -f1))",
)
api_def_args_str = ",".join(api_def_args)
native.genrule(
name = name + "_genrule",
outs = [
out_ops_file + ".h",
out_ops_file + ".cc",
out_ops_file + "_internal.h",
out_ops_file + "_internal.cc",
],
srcs = srcs,
tools = [":" + tool] + tf_binary_additional_srcs(),
cmd = ("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
"$(location :" + out_ops_file + ".cc) " +
str(include_internal_ops) + " " + api_def_args_str),
)
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate individual C++ .cc and .h
# files for each of the ops files mentioned, and then generate a
# single cc_library called "name" that combines all the
# generated C++ code.
#
# For example, for:
# tf_gen_op_wrappers_cc("tf_ops_lib", [ "array_ops", "math_ops" ])
#
#
# This will ultimately generate ops/* files and a library like:
#
# cc_library(name = "tf_ops_lib",
# srcs = [ "ops/array_ops.cc",
# "ops/math_ops.cc" ],
# hdrs = [ "ops/array_ops.h",
# "ops/math_ops.h" ],
# deps = [ ... ])
#
# Plus a private library for the "hidden" ops.
# cc_library(name = "tf_ops_lib_internal",
# srcs = [ "ops/array_ops_internal.cc",
# "ops/math_ops_internal.cc" ],
# hdrs = [ "ops/array_ops_internal.h",
# "ops/math_ops_internal.h" ],
# deps = [ ... ])
# TODO(joshl): Cleaner approach for hidden ops.
def tf_gen_op_wrappers_cc(
name,
op_lib_names = [],
other_srcs = [],
other_hdrs = [],
other_srcs_internal = [],
other_hdrs_internal = [],
pkg = "",
deps = [
clean_dep("//tensorflow/cc:ops"),
clean_dep("//tensorflow/cc:scope"),
clean_dep("//tensorflow/cc:const_op"),
],
deps_internal = [],
op_gen = clean_dep("//tensorflow/cc:cc_op_gen_main"),
include_internal_ops = 0,
visibility = None,
# ApiDefs will be loaded in the order specified in this list.
api_def_srcs = [],
# Any extra dependencies that the wrapper generator might need.
extra_gen_deps = []):
subsrcs = other_srcs[:]
subhdrs = other_hdrs[:]
internalsrcs = other_srcs_internal[:]
internalhdrs = other_hdrs_internal[:]
for n in op_lib_names:
tf_gen_op_wrapper_cc(
n,
"ops/" + n,
api_def_srcs = api_def_srcs,
include_internal_ops = include_internal_ops,
op_gen = op_gen,
pkg = pkg,
deps = [pkg + ":" + n + "_op_lib"] + extra_gen_deps,
)
subsrcs += ["ops/" + n + ".cc"]
subhdrs += ["ops/" + n + ".h"]
internalsrcs += ["ops/" + n + "_internal.cc"]
internalhdrs += ["ops/" + n + "_internal.h"]
native.cc_library(
name = name,
srcs = subsrcs,
hdrs = subhdrs,
deps = deps + if_not_android([
clean_dep("//tensorflow/core:core_cpu"),
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:lib"),
clean_dep("//tensorflow/core:ops"),
clean_dep("//tensorflow/core:protos_all_cc"),
]) + if_android([
clean_dep("//tensorflow/core:android_tensorflow_lib"),
]),
copts = tf_copts(),
alwayslink = 1,
visibility = visibility,
)
native.cc_library(
name = name + "_internal",
srcs = internalsrcs,
hdrs = internalhdrs,
deps = deps + deps_internal + if_not_android([
clean_dep("//tensorflow/core:core_cpu"),
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:lib"),
clean_dep("//tensorflow/core:ops"),
clean_dep("//tensorflow/core:protos_all_cc"),
]) + if_android([
clean_dep("//tensorflow/core:android_tensorflow_lib"),
]),
copts = tf_copts(),
alwayslink = 1,
visibility = [clean_dep("//tensorflow:internal")],
)
# Generates a Python library target wrapping the ops registered in "deps".
#
# Args:
# name: used as the name of the generated target and as a name component of
# the intermediate files.
# out: name of the python file created by this rule. If None, then
# "ops/gen_{name}.py" is used.
# hidden: Optional list of ops names to make private in the Python module.
# It is invalid to specify both "hidden" and "op_whitelist".
# visibility: passed to py_library.
# deps: list of dependencies for the intermediate tool used to generate the
# python target. NOTE these `deps` are not applied to the final python
# library target itself.
# require_shape_functions: leave this as False.
# hidden_file: optional file that contains a list of op names to make private
# in the generated Python module. Each op name should be on a line by
# itself. Lines that start with characters that are invalid op name
# starting characters are treated as comments and ignored.
# generated_target_name: name of the generated target (overrides the
# "name" arg)
# op_whitelist: if not empty, only op names in this list will be wrapped. It
# is invalid to specify both "hidden" and "op_whitelist".
# cc_linkopts: Optional linkopts to be added to tf_cc_binary that contains the
# specified ops.
def tf_gen_op_wrapper_py(
name,
out = None,
hidden = None,
visibility = None,
deps = [],
require_shape_functions = False,
hidden_file = None,
generated_target_name = None,
op_whitelist = [],
cc_linkopts = [],
api_def_srcs = []):
if (hidden or hidden_file) and op_whitelist:
fail("Cannot pass specify both hidden and op_whitelist.")
# Construct a cc_binary containing the specified ops.
tool_name = "gen_" + name + "_py_wrappers_cc"
if not deps:
deps = [str(Label("//tensorflow/core:" + name + "_op_lib"))]
tf_cc_binary(
name = tool_name,
copts = tf_copts(),
linkopts = if_not_windows(["-lm", "-Wl,-ldl"]) + cc_linkopts,
linkstatic = 1, # Faster to link this one-time-use binary dynamically
visibility = [clean_dep("//tensorflow:internal")],
deps = ([
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/python:python_op_gen_main"),
] + deps),
)
# Invoke the previous cc_binary to generate a python file.
if not out:
out = "ops/gen_" + name + ".py"
if hidden:
op_list_arg = ",".join(hidden)
op_list_is_whitelist = False
elif op_whitelist:
op_list_arg = ",".join(op_whitelist)
op_list_is_whitelist = True
else:
op_list_arg = "''"
op_list_is_whitelist = False
# Prepare ApiDef directories to pass to the genrule.
if not api_def_srcs:
api_def_args_str = ","
else:
api_def_args = []
for api_def_src in api_def_srcs:
# Add directory of the first ApiDef source to args.
# We are assuming all ApiDefs in a single api_def_src are in the
# same directory.
api_def_args.append(
"$$(dirname $$(echo $(locations " + api_def_src +
") | cut -d\" \" -f1))",
)
api_def_args_str = ",".join(api_def_args)
if hidden_file:
# `hidden_file` is file containing a list of op names to be hidden in the
# generated module.
native.genrule(
name = name + "_pygenrule",
outs = [out],
srcs = api_def_srcs + [hidden_file],
tools = [tool_name] + tf_binary_additional_srcs(),
cmd = ("$(location " + tool_name + ") " + api_def_args_str +
" @$(location " + hidden_file + ") " +
("1" if require_shape_functions else "0") + " > $@"),
)
else:
native.genrule(
name = name + "_pygenrule",
outs = [out],
srcs = api_def_srcs,
tools = [tool_name] + tf_binary_additional_srcs(),
cmd = ("$(location " + tool_name + ") " + api_def_args_str + " " +
op_list_arg + " " +
("1" if require_shape_functions else "0") + " " +
("1" if op_list_is_whitelist else "0") + " > $@"),
)
# Make a py_library out of the generated python file.
if not generated_target_name:
generated_target_name = name
native.py_library(
name = generated_target_name,
srcs = [out],
srcs_version = "PY2AND3",
visibility = visibility,
deps = [
clean_dep("//tensorflow/python:framework_for_generated_wrappers_v2"),
],
# Instruct build_cleaner to try to avoid using this rule; typically ops
# creators will provide their own tf_custom_op_py_library based target
# that wraps this one.
tags = ["avoid_dep"],
)
# Define a bazel macro that creates cc_test for tensorflow.
#
# Links in the framework shared object
# (//third_party/tensorflow:libtensorflow_framework.so) when not building
# statically. Also adds linker options (rpaths) so that the framework shared
# object can be found.
#
# TODO(opensource): we need to enable this to work around the hidden symbol
# __cudaRegisterFatBinary error. Need more investigations.
def tf_cc_test(
name,
srcs,
deps,
data = [],
linkstatic = 0,
extra_copts = [],
suffix = "",
linkopts = [],
kernels = [],
**kwargs):
native.cc_test(
name = "%s%s" % (name, suffix),
srcs = srcs + tf_binary_additional_srcs(),
copts = tf_copts() + extra_copts,
linkopts = select({
clean_dep("//tensorflow:android"): [
"-pie",
],
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:macos"): [
"-lm",
],
"//conditions:default": [
"-lpthread",
"-lm",
],
}) + linkopts + _rpath_linkopts(name),
deps = deps + tf_binary_dynamic_kernel_deps(kernels) + if_mkl_ml(
[
clean_dep("//third_party/mkl:intel_binary_blob"),
],
),
data = data +
tf_binary_dynamic_kernel_dsos() +
tf_binary_additional_srcs(),
exec_compatible_with = tf_exec_compatible_with(kwargs),
# Nested select() statements seem not to be supported when passed to
# linkstatic, and we already have a cuda select() passed in to this
# function.
linkstatic = linkstatic or select({
# cc_tests with ".so"s in srcs incorrectly link on Darwin unless
# linkstatic=1 (https://github.com/bazelbuild/bazel/issues/3450).
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
clean_dep("//tensorflow:macos"): 1,
"//conditions:default": 0,
}),
**kwargs
)
register_extension_info(
extension_name = "tf_cc_test",
label_regex_for_dep = "{extension_name}.*",
)
# Part of the testing workflow requires a distinguishable name for the build
# rules that involve a GPU, even if otherwise identical to the base rule.
def tf_cc_test_gpu(
name,
srcs,
deps,
linkstatic = 0,
tags = [],
data = [],
size = "medium",
suffix = "",
args = None):
tf_cc_test(
name,
srcs,
deps,
size = size,
args = args,
data = data,
linkstatic = linkstatic,
suffix = suffix,
tags = tags,
)
register_extension_info(
extension_name = "tf_cc_test_gpu",
label_regex_for_dep = "{extension_name}",
)
def tf_gpu_cc_test(
name,
srcs = [],
deps = [],
tags = [],
data = [],
size = "medium",
extra_copts = [],
linkstatic = 0,
args = [],
kernels = [],
linkopts = []):
tf_cc_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
extra_copts = extra_copts + if_cuda(["-DNV_CUDNN_DISABLE_EXCEPTION"]),
kernels = kernels,
linkopts = linkopts,
linkstatic = linkstatic,
tags = tags + ["manual"],
deps = deps,
)
tf_cc_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
extra_copts = extra_copts + if_cuda(["-DNV_CUDNN_DISABLE_EXCEPTION"]),
kernels = kernels,
linkopts = linkopts,
linkstatic = select({
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
clean_dep("//tensorflow:macos"): 1,
"@local_config_cuda//cuda:using_nvcc": 1,
"@local_config_cuda//cuda:using_clang": 1,
"//conditions:default": 0,
}),
suffix = "_gpu",
tags = tags + tf_gpu_tests_tags(),
deps = deps + if_cuda_is_configured([
clean_dep("//tensorflow/core:gpu_runtime"),
]) + if_rocm_is_configured([
clean_dep("//tensorflow/core:gpu_runtime"),
]),
)
register_extension_info(
extension_name = "tf_gpu_cc_test",
label_regex_for_dep = "{extension_name}",
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_cc_test(*args, **kwargs):
tf_gpu_cc_test(*args, **kwargs)
register_extension_info(
extension_name = "tf_cuda_cc_test",
label_regex_for_dep = "{extension_name}",
)
def tf_gpu_only_cc_test(
name,
srcs = [],
deps = [],
tags = [],
data = [],
size = "medium",
linkstatic = 0,
args = [],
kernels = [],
linkopts = []):
tags = tags + tf_gpu_tests_tags()
native.cc_test(
name = "%s%s" % (name, "_gpu"),
srcs = srcs + tf_binary_additional_srcs(),
size = size,
args = args,
copts = _cuda_copts() + rocm_copts() + tf_copts(),
features = if_cuda(["-use_header_modules"]),
data = data + tf_binary_dynamic_kernel_dsos(),
deps = deps + tf_binary_dynamic_kernel_deps(kernels) + if_cuda_is_configured([
clean_dep("//tensorflow/core:cuda"),
clean_dep("//tensorflow/core:gpu_lib"),
]) + if_rocm_is_configured([
clean_dep("//tensorflow/core:gpu_lib"),
]),
linkopts = if_not_windows(["-lpthread", "-lm"]) + linkopts + _rpath_linkopts(name),
linkstatic = linkstatic or select({
# cc_tests with ".so"s in srcs incorrectly link on Darwin
# unless linkstatic=1.
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
clean_dep("//tensorflow:macos"): 1,
"//conditions:default": 0,
}),
tags = tags,
exec_compatible_with = tf_exec_compatible_with({"tags": tags}),
)
register_extension_info(
extension_name = "tf_gpu_only_cc_test",
label_regex_for_dep = "{extension_name}_gpu",
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_only_cc_test(*args, **kwargs):
tf_gpu_only_cc_test(*args, **kwargs)
register_extension_info(
extension_name = "tf_cuda_only_cc_test",
label_regex_for_dep = "{extension_name}_gpu",
)
# Create a cc_test for each of the tensorflow tests listed in "tests"
def tf_cc_tests(
srcs,
deps,
name = "",
linkstatic = 0,
tags = [],
size = "medium",
args = None,
linkopts = [],
kernels = []):
for src in srcs:
tf_cc_test(
name = src_to_test_name(src),
size = size,
srcs = [src],
args = args,
kernels = kernels,
linkopts = linkopts,
linkstatic = linkstatic,
tags = tags,
deps = deps,
)
def tf_cc_test_mkl(
srcs,
deps,
name = "",
data = [],
linkstatic = 0,
tags = [],
size = "medium",
kernels = [],
args = None):
# -fno-exceptions in nocopts breaks compilation if header modules are enabled.
disable_header_modules = ["-use_header_modules"]
for src in srcs:
native.cc_test(
name = src_to_test_name(src),
srcs = if_mkl([src]) + tf_binary_additional_srcs(),
copts = tf_copts(allow_exceptions = True) + tf_openmp_copts(),
linkopts = select({
clean_dep("//tensorflow:android"): [
"-pie",
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-lpthread",
"-lm",
],
}) + _rpath_linkopts(src_to_test_name(src)),
deps = deps + tf_binary_dynamic_kernel_deps(kernels) + if_mkl_ml(["//third_party/mkl:intel_binary_blob"]),
data = data + tf_binary_dynamic_kernel_dsos(),
exec_compatible_with = tf_exec_compatible_with({"tags": tags}),
linkstatic = linkstatic,
tags = tags,
size = size,
args = args,
features = disable_header_modules,
)
def tf_cc_tests_gpu(
srcs,
deps,
name = "",
linkstatic = 0,
tags = [],
size = "medium",
kernels = [],
args = None):
tf_cc_tests(srcs, deps, linkstatic, size = size, args = args, kernels = kernels, tags = tags)
def tf_gpu_cc_tests(
srcs,
deps,
name = "",
tags = [],
size = "medium",
linkstatic = 0,
args = None,
kernels = [],
linkopts = []):
for src in srcs:
tf_gpu_cc_test(
name = src_to_test_name(src),
size = size,
srcs = [src],
args = args,
kernels = kernels,
linkopts = linkopts,
linkstatic = linkstatic,
tags = tags,
deps = deps,
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_cc_tests(*args, **kwargs):
tf_gpu_cc_tests(*args, **kwargs)
def tf_java_test(
name,
srcs = [],
deps = [],
kernels = [],
*args,
**kwargs):
native.java_test(
name = name,
srcs = srcs,
deps = deps + tf_binary_additional_srcs(fullversion = True) + tf_binary_dynamic_kernel_dsos() + tf_binary_dynamic_kernel_deps(kernels),
*args,
**kwargs
)
register_extension_info(
extension_name = "tf_java_test",
label_regex_for_dep = "{extension_name}",
)
def _cuda_copts(opts = []):
"""Gets the appropriate set of copts for (maybe) CUDA compilation.
If we're doing CUDA compilation, returns copts for our particular CUDA
compiler. If we're not doing CUDA compilation, returns an empty list.
"""
return cuda_default_copts() + select({
"//conditions:default": [],
"@local_config_cuda//cuda:using_nvcc": ([
"-nvcc_options=relaxed-constexpr",
"-nvcc_options=ftz=true",
]),
"@local_config_cuda//cuda:using_clang": ([
"-fcuda-flush-denormals-to-zero",
]),
}) + if_cuda_is_configured_compat(opts)
# Build defs for TensorFlow kernels
# When this target is built using --config=cuda, a cc_library is built
# that passes -DGOOGLE_CUDA=1 and '-x cuda', linking in additional
# libraries needed by GPU kernels.
#
# When this target is built using --config=rocm, a cc_library is built
# that passes -DTENSORFLOW_USE_ROCM and '-x rocm', linking in additional
# libraries needed by GPU kernels.
def tf_gpu_kernel_library(
srcs,
copts = [],
cuda_copts = [],
deps = [],
hdrs = [],
**kwargs):
copts = copts + tf_copts() + _cuda_copts(opts = cuda_copts) + rocm_copts(opts = cuda_copts)
kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
native.cc_library(
srcs = srcs,
hdrs = hdrs,
copts = copts,
deps = deps + if_cuda_is_configured_compat([
clean_dep("//tensorflow/stream_executor/cuda:cudart_stub"),
clean_dep("//tensorflow/core:gpu_lib"),
]) + if_rocm_is_configured([
clean_dep("//tensorflow/core:gpu_lib"),
]),
alwayslink = 1,
**kwargs
)
register_extension_info(
extension_name = "tf_gpu_kernel_library",
label_regex_for_dep = "{extension_name}",
)
def tf_gpu_library(deps = None, cuda_deps = None, copts = tf_copts(), **kwargs):
"""Generate a cc_library with a conditional set of CUDA dependencies.
When the library is built with --config=cuda:
- Both deps and cuda_deps are used as dependencies.
- The cuda runtime is added as a dependency (if necessary).
- The library additionally passes -DGOOGLE_CUDA=1 to the list of copts.
- In addition, when the library is also built with TensorRT enabled, it
additionally passes -DGOOGLE_TENSORRT=1 to the list of copts. Likewise
for NCCL and -DGOOGLE_NCCL=1.
Args:
- cuda_deps: BUILD dependencies which will be linked if and only if:
'--config=cuda' is passed to the bazel command line.
- deps: dependencies which will always be linked.
- copts: copts always passed to the cc_library.
- kwargs: Any other argument to cc_library.
"""
if not deps:
deps = []
if not cuda_deps:
cuda_deps = []
kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
native.cc_library(
deps = deps + if_cuda_is_configured_compat(cuda_deps + [
clean_dep("//tensorflow/stream_executor/cuda:cudart_stub"),
"@local_config_cuda//cuda:cuda_headers",
]) + if_rocm_is_configured(cuda_deps + [
"@local_config_rocm//rocm:rocm_headers",
]),
copts = (copts + if_cuda(["-DGOOGLE_CUDA=1", "-DNV_CUDNN_DISABLE_EXCEPTION"]) + if_rocm(["-DTENSORFLOW_USE_ROCM=1"]) + if_mkl(["-DINTEL_MKL=1"]) + if_mkl_open_source_only(["-DINTEL_MKL_DNN_ONLY"]) + if_enable_mkl(["-DENABLE_MKL"]) + if_tensorrt(["-DGOOGLE_TENSORRT=1"]) + if_nccl(["-DGOOGLE_NCCL=1"])),
**kwargs
)
register_extension_info(
extension_name = "tf_gpu_library",
label_regex_for_dep = "{extension_name}",
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_library(*args, **kwargs):
tf_gpu_library(*args, **kwargs)
register_extension_info(
extension_name = "tf_cuda_library",
label_regex_for_dep = "{extension_name}",
)
def tf_kernel_library(
name,
prefix = None,
srcs = None,
gpu_srcs = None,
hdrs = None,
deps = None,
alwayslink = 1,
copts = None,
gpu_copts = None,
is_external = False,
**kwargs):
"""A rule to build a TensorFlow OpKernel.
May either specify srcs/hdrs or prefix. Similar to tf_gpu_library,
but with alwayslink=1 by default. If prefix is specified:
* prefix*.cc (except *.cu.cc) is added to srcs
* prefix*.h (except *.cu.h) is added to hdrs
* prefix*.cu.cc and prefix*.h (including *.cu.h) are added to gpu_srcs.
With the exception that test files are excluded.
For example, with prefix = "cast_op",
* srcs = ["cast_op.cc"]
* hdrs = ["cast_op.h"]
* gpu_srcs = ["cast_op_gpu.cu.cc", "cast_op.h"]
* "cast_op_test.cc" is excluded
With prefix = "cwise_op"
* srcs = ["cwise_op_abs.cc", ..., "cwise_op_tanh.cc"],
* hdrs = ["cwise_ops.h", "cwise_ops_common.h"],
* gpu_srcs = ["cwise_op_gpu_abs.cu.cc", ..., "cwise_op_gpu_tanh.cu.cc",
"cwise_ops.h", "cwise_ops_common.h",
"cwise_ops_gpu_common.cu.h"]
* "cwise_ops_test.cc" is excluded
"""
if not srcs:
srcs = []
if not hdrs:
hdrs = []
if not deps:
deps = []
if not copts:
copts = []
if not gpu_copts:
gpu_copts = []
textual_hdrs = []
copts = copts + tf_copts(is_external = is_external) + if_cuda(["-DNV_CUDNN_DISABLE_EXCEPTION"])
# Override EIGEN_STRONG_INLINE to inline when
# --define=override_eigen_strong_inline=true to avoid long compiling time.
# See https://github.com/tensorflow/tensorflow/issues/10521
copts = copts + if_override_eigen_strong_inline(["/DEIGEN_STRONG_INLINE=inline"])
if prefix:
if native.glob([prefix + "*.cu.cc"], exclude = ["*test*"]):
if not gpu_srcs:
gpu_srcs = []
gpu_srcs = gpu_srcs + native.glob(
[prefix + "*.cu.cc", prefix + "*.h"],
exclude = [prefix + "*test*"],
)
srcs = srcs + native.glob(
[prefix + "*.cc"],
exclude = [prefix + "*test*", prefix + "*.cu.cc"],
)
hdrs = hdrs + native.glob(
[prefix + "*.h"],
exclude = [prefix + "*test*", prefix + "*.cu.h", prefix + "*impl.h"],
)
textual_hdrs = native.glob(
[prefix + "*impl.h"],
exclude = [prefix + "*test*", prefix + "*.cu.h"],
)
cuda_deps = [clean_dep("//tensorflow/core:gpu_lib")]
if gpu_srcs:
for gpu_src in gpu_srcs:
if gpu_src.endswith(".cc") and not gpu_src.endswith(".cu.cc"):
fail("{} not allowed in gpu_srcs. .cc sources must end with .cu.cc"
.format(gpu_src))
tf_gpu_kernel_library(
name = name + "_gpu",
srcs = gpu_srcs,
deps = deps,
copts = gpu_copts,
**kwargs
)
cuda_deps.extend([":" + name + "_gpu"])
kwargs["tags"] = kwargs.get("tags", []) + [
"req_dep=%s" % clean_dep("//tensorflow/core:gpu_lib"),
"req_dep=@local_config_cuda//cuda:cuda_headers",
]
tf_gpu_library(
name = name,
srcs = srcs,
hdrs = hdrs,
textual_hdrs = textual_hdrs,
copts = copts,
cuda_deps = cuda_deps,
linkstatic = 1, # Needed since alwayslink is broken in bazel b/27630669
alwayslink = alwayslink,
deps = deps,
**kwargs
)
# TODO(gunan): CUDA dependency not clear here. Fix it.
tf_cc_shared_object(
name = "libtfkernel_%s.so" % name,
srcs = srcs + hdrs,
copts = copts,
tags = ["manual", "notap"],
deps = deps,
)
register_extension_info(
extension_name = "tf_kernel_library",
label_regex_for_dep = "{extension_name}(_gpu)?",
)
def tf_mkl_kernel_library(
name,
prefix = None,
srcs = None,
hdrs = None,
deps = None,
alwayslink = 1,
copts = tf_copts(allow_exceptions = True) + tf_openmp_copts()):
"""A rule to build MKL-based TensorFlow kernel libraries."""
if not bool(srcs):
srcs = []
if not bool(hdrs):
hdrs = []
if prefix:
srcs = srcs + native.glob(
[prefix + "*.cc"],
exclude = [prefix + "*test*"],
)
hdrs = hdrs + native.glob(
[prefix + "*.h"],
exclude = [prefix + "*test*"],
)
# -fno-exceptions in nocopts breaks compilation if header modules are enabled.
disable_header_modules = ["-use_header_modules"]
native.cc_library(
name = name,
srcs = if_mkl(srcs),
hdrs = hdrs,
deps = deps,
alwayslink = alwayslink,
copts = copts,
features = disable_header_modules,
)
register_extension_info(
extension_name = "tf_mkl_kernel_library",
label_regex_for_dep = "{extension_name}",
)
def _get_transitive_headers(hdrs, deps):
"""Obtain the header files for a target and its transitive dependencies.
Args:
hdrs: a list of header files
deps: a list of targets that are direct dependencies
Returns:
a collection of the transitive headers
"""
return depset(
hdrs,
transitive = [dep[CcInfo].compilation_context.headers for dep in deps],
)
# Bazel rules for building swig files.
def _py_wrap_cc_impl(ctx):
srcs = ctx.files.srcs
if len(srcs) != 1:
fail("Exactly one SWIG source file label must be specified.", "srcs")
module_name = ctx.attr.module_name
src = ctx.files.srcs[0]
inputs = _get_transitive_headers([src] + ctx.files.swig_includes, ctx.attr.deps)
inputs = depset(ctx.files._swiglib, transitive = [inputs])
inputs = depset(ctx.files.toolchain_deps, transitive = [inputs])
swig_include_dirs = depset(_get_repository_roots(ctx, inputs))
swig_include_dirs = depset(sorted([f.dirname for f in ctx.files._swiglib]), transitive = [swig_include_dirs])
args = [
"-c++",
"-python",
"-module",
module_name,
"-o",
ctx.outputs.cc_out.path,
"-outdir",
ctx.outputs.py_out.dirname,
]
args += ["-l" + f.path for f in ctx.files.swig_includes]
args += ["-I" + i for i in swig_include_dirs.to_list()]
args += [src.path]
outputs = [ctx.outputs.cc_out, ctx.outputs.py_out]
ctx.actions.run(
executable = ctx.executable._swig,
arguments = args,
inputs = inputs.to_list(),
outputs = outputs,
mnemonic = "PythonSwig",
progress_message = "SWIGing " + src.path,
)
return struct(files = depset(outputs))
_py_wrap_cc = rule(
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
"swig_includes": attr.label_list(
allow_files = True,
),
"deps": attr.label_list(
allow_files = True,
providers = [CcInfo],
),
"toolchain_deps": attr.label_list(
allow_files = True,
),
"module_name": attr.string(mandatory = True),
"py_module_name": attr.string(mandatory = True),
"_swig": attr.label(
default = Label("@swig//:swig"),
executable = True,
cfg = "host",
),
"_swiglib": attr.label(
default = Label("@swig//:templates"),
allow_files = True,
),
},
outputs = {
"cc_out": "%{module_name}.cc",
"py_out": "%{py_module_name}.py",
},
implementation = _py_wrap_cc_impl,
)
def _get_repository_roots(ctx, files):
"""Returns abnormal root directories under which files reside.
When running a ctx.action, source files within the main repository are all
relative to the current directory; however, files that are generated or exist
in remote repositories will have their root directory be a subdirectory,
e.g. bazel-out/local-fastbuild/genfiles/external/jpeg_archive. This function
returns the set of these devious directories, ranked and sorted by popularity
in order to hopefully minimize the number of I/O system calls within the
compiler, because includes have quadratic complexity.
"""
result = {}
for f in files.to_list():
root = f.root.path
if root:
if root not in result:
result[root] = 0
result[root] -= 1
work = f.owner.workspace_root
if work:
if root:
root += "/"
root += work
if root:
if root not in result:
result[root] = 0
result[root] -= 1
return [k for v, k in sorted([(v, k) for k, v in result.items()])]
# Bazel rule for collecting the header files that a target depends on.
def _transitive_hdrs_impl(ctx):
outputs = _get_transitive_headers([], ctx.attr.deps)
return struct(files = outputs)
_transitive_hdrs = rule(
attrs = {
"deps": attr.label_list(
allow_files = True,
providers = [CcInfo],
),
},
implementation = _transitive_hdrs_impl,
)
def transitive_hdrs(name, deps = [], **kwargs):
_transitive_hdrs(name = name + "_gather", deps = deps)
native.filegroup(name = name, srcs = [":" + name + "_gather"])
# Create a header only library that includes all the headers exported by
# the libraries in deps.
def cc_header_only_library(name, deps = [], includes = [], extra_deps = [], **kwargs):
_transitive_hdrs(name = name + "_gather", deps = deps)
native.cc_library(
name = name,
hdrs = [":" + name + "_gather"],
includes = includes,
deps = extra_deps,
**kwargs
)
def tf_custom_op_library_additional_deps():
return [
"@com_google_protobuf//:protobuf_headers",
clean_dep("//third_party/eigen3"),
clean_dep("//tensorflow/core:framework_headers_lib"),
] + if_windows(["//tensorflow/python:pywrap_tensorflow_import_lib"])
# A list of targets that contains the implemenation of
# tf_custom_op_library_additional_deps. It's used to generate a DEF file for
# exporting symbols from _pywrap_tensorflow.dll on Windows.
def tf_custom_op_library_additional_deps_impl():
return [
"@com_google_protobuf//:protobuf",
"@nsync//:nsync_cpp",
# for //third_party/eigen3
clean_dep("//third_party/eigen3"),
# for //tensorflow/core:framework_headers_lib
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:reader_base"),
]
# Traverse the dependency graph along the "deps" attribute of the
# target and return a struct with one field called 'tf_collected_deps'.
# tf_collected_deps will be the union of the deps of the current target
# and the tf_collected_deps of the dependencies of this target.
def _collect_deps_aspect_impl(target, ctx):
alldeps = depset()
if hasattr(ctx.rule.attr, "deps"):
for dep in ctx.rule.attr.deps:
alldeps = depset([dep.label], transitive = [alldeps])
if hasattr(dep, "tf_collected_deps"):
alldeps = depset(transitive = [alldeps, dep.tf_collected_deps])
return struct(tf_collected_deps = alldeps)
collect_deps_aspect = aspect(
attr_aspects = ["deps"],
implementation = _collect_deps_aspect_impl,
)
def _dep_label(dep):
label = dep.label
return label.package + ":" + label.name
# This rule checks that the transitive dependencies of targets listed
# in the 'deps' attribute don't depend on the targets listed in
# the 'disallowed_deps' attribute.
def _check_deps_impl(ctx):
disallowed_deps = ctx.attr.disallowed_deps
for input_dep in ctx.attr.deps:
if not hasattr(input_dep, "tf_collected_deps"):
continue
for dep in input_dep.tf_collected_deps.to_list():
for disallowed_dep in disallowed_deps:
if dep == disallowed_dep.label:
fail(
_dep_label(input_dep) + " cannot depend on " + _dep_label(
disallowed_dep,
),
)
return struct()
check_deps = rule(
_check_deps_impl,
attrs = {
"deps": attr.label_list(
aspects = [collect_deps_aspect],
mandatory = True,
allow_files = True,
),
"disallowed_deps": attr.label_list(
mandatory = True,
allow_files = True,
),
},
)
def tf_custom_op_library(name, srcs = [], gpu_srcs = [], deps = [], linkopts = [], copts = [], **kwargs):
"""Helper to build a dynamic library (.so) from the sources containing implementations of custom ops and kernels.
"""
cuda_deps = [
clean_dep("//tensorflow/core:stream_executor_headers_lib"),
"@local_config_cuda//cuda:cuda_headers",
"@local_config_cuda//cuda:cudart_static",
]
rocm_deps = [
clean_dep("//tensorflow/core:stream_executor_headers_lib"),
]
deps = deps + tf_custom_op_library_additional_deps()
# Override EIGEN_STRONG_INLINE to inline when
# --define=override_eigen_strong_inline=true to avoid long compiling time.
# See https://github.com/tensorflow/tensorflow/issues/10521
copts = copts + if_override_eigen_strong_inline(["/DEIGEN_STRONG_INLINE=inline"]) + if_cuda(["-DNV_CUDNN_DISABLE_EXCEPTION"])
if gpu_srcs:
basename = name.split(".")[0]
native.cc_library(
name = basename + "_gpu",
srcs = gpu_srcs,
copts = copts + _cuda_copts() + if_tensorrt(["-DGOOGLE_TENSORRT=1"]) + if_nccl(["-DGOOGLE_NCCL=1"]),
features = if_cuda(["-use_header_modules"]),
deps = deps + if_cuda_is_configured_compat(cuda_deps) + if_rocm_is_configured(rocm_deps),
**kwargs
)
cuda_deps.extend([":" + basename + "_gpu"])
rocm_deps.extend([":" + basename + "_gpu"])
check_deps(
name = name + "_check_deps",
disallowed_deps = [
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:lib"),
],
deps = deps + if_cuda_is_configured_compat(cuda_deps) + if_rocm_is_configured(rocm_deps),
)
tf_cc_shared_object(
name = name,
srcs = srcs,
deps = deps + if_cuda_is_configured_compat(cuda_deps) + if_rocm_is_configured(rocm_deps),
data = if_static([name + "_check_deps"]),
copts = copts + tf_copts(is_external = True),
features = ["windows_export_all_symbols"],
linkopts = linkopts + select({
"//conditions:default": [
"-lm",
],
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:macos"): [],
}),
**kwargs
)
register_extension_info(
extension_name = "tf_custom_op_library",
label_regex_for_dep = "{extension_name}",
)
def tf_custom_op_py_library(
name,
srcs = [],
dso = [],
kernels = [],
srcs_version = "PY2AND3",
visibility = None,
deps = []):
_ignore = [kernels]
native.py_library(
name = name,
data = dso,
srcs = srcs,
srcs_version = srcs_version,
visibility = visibility,
deps = deps,
)
register_extension_info(
extension_name = "tf_custom_op_py_library",
label_regex_for_dep = "{extension_name}",
)
# In tf_py_wrap_cc generated libraries
# module init functions are not exported unless
# they contain one of the keywords in the version file
# this prevents custom python modules.
# This function attempts to append init_module_name to list of
# exported functions in version script
def _append_init_to_versionscript_impl(ctx):
mod_name = ctx.attr.module_name
if ctx.attr.is_version_script:
ctx.actions.expand_template(
template = ctx.file.template_file,
output = ctx.outputs.versionscript,
substitutions = {
"global:": "global:\n init_%s;\n _init_%s;\n PyInit_*;\n _PyInit_*;" % (mod_name, mod_name),
},
is_executable = False,
)
else:
ctx.actions.expand_template(
template = ctx.file.template_file,
output = ctx.outputs.versionscript,
substitutions = {
"*tensorflow*": "*tensorflow*\ninit_%s\n_init_%s\nPyInit_*\n_PyInit_*\n" % (mod_name, mod_name),
},
is_executable = False,
)
_append_init_to_versionscript = rule(
attrs = {
"module_name": attr.string(mandatory = True),
"template_file": attr.label(
allow_single_file = True,
mandatory = True,
),
"is_version_script": attr.bool(
default = True,
doc = "whether target is a ld version script or exported symbol list",
mandatory = False,
),
},
outputs = {"versionscript": "%{name}.lds"},
implementation = _append_init_to_versionscript_impl,
)
def tf_py_wrap_cc(
name,
srcs,
swig_includes = [],
deps = [],
copts = [],
version_script = None,
**kwargs):
"""Builds a Python extension module."""
module_name = name.split("/")[-1]
# Convert a rule name such as foo/bar/baz to foo/bar/_baz.so
# and use that as the name for the rule producing the .so file.
cc_library_base = "/".join(name.split("/")[:-1] + ["_" + module_name])
# TODO(b/137885063): tf_cc_shared_object needs to be cleaned up; we really
# shouldn't be passing a name qualified with .so here.
cc_library_name = cc_library_base + ".so"
cc_library_pyd_name = "/".join(
name.split("/")[:-1] + ["_" + module_name + ".pyd"],
)
extra_deps = []
_py_wrap_cc(
name = name + "_py_wrap",
srcs = srcs,
module_name = module_name,
py_module_name = name,
swig_includes = swig_includes,
toolchain_deps = ["@bazel_tools//tools/cpp:current_cc_toolchain"],
deps = deps + extra_deps,
)
if not version_script:
version_script = select({
"@local_config_cuda//cuda:darwin": clean_dep("//tensorflow:tf_exported_symbols.lds"),
"//conditions:default": clean_dep("//tensorflow:tf_version_script.lds"),
})
vscriptname = name + "_versionscript"
_append_init_to_versionscript(
name = vscriptname,
is_version_script = select({
"@local_config_cuda//cuda:darwin": False,
"//conditions:default": True,
}),
module_name = module_name,
template_file = version_script,
)
extra_linkopts = select({
"@local_config_cuda//cuda:darwin": [
"-Wl,-exported_symbols_list,$(location %s.lds)" % vscriptname,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,--version-script",
"$(location %s.lds)" % vscriptname,
],
})
extra_deps += select({
"@local_config_cuda//cuda:darwin": [
"%s.lds" % vscriptname,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"%s.lds" % vscriptname,
],
})
tf_cc_shared_object(
name = cc_library_name,
srcs = [module_name + ".cc"],
copts = copts + if_not_windows([
"-Wno-self-assign",
"-Wno-sign-compare",
"-Wno-write-strings",
]),
linkopts = extra_linkopts,
linkstatic = 1,
deps = deps + extra_deps,
**kwargs
)
# When a non-versioned .so is added as a 'src' to a bazel target, it uses
# -l%(so_name) instead of -l:%(so_file) during linking. When -l%(so_name)
# is passed to ld, it will look for an associated file with the schema
# lib%(so_name).so. Since pywrap_tensorflow is not explicitly versioned
# and is not prefixed with lib_, we add a rule for the creation of an .so
# file with the canonical lib schema (e.g. libNAME.so), so that
# -l%(so_name) is resolved during linking.
#
# See: https://github.com/bazelbuild/bazel/blob/7a6808260a733d50983c1adf0cf5a7493472267f/src/main/java/com/google/devtools/build/lib/rules/cpp/LibrariesToLinkCollector.java#L319
for pattern in SHARED_LIBRARY_NAME_PATTERNS:
name_os = pattern % (cc_library_base, "")
native.genrule(
name = name_os + "_rule",
srcs = [":" + cc_library_name],
outs = [name_os],
cmd = "cp $< $@",
)
native.genrule(
name = "gen_" + cc_library_pyd_name,
srcs = [":" + cc_library_name],
outs = [cc_library_pyd_name],
cmd = "cp $< $@",
)
native.py_library(
name = name,
srcs = [":" + name + ".py"],
srcs_version = "PY2AND3",
data = select({
clean_dep("//tensorflow:windows"): [":" + cc_library_pyd_name],
"//conditions:default": [":" + cc_library_name],
}),
)
# This macro is for running python tests against system installed pip package
# on Windows.
#
# py_test is built as an executable python zip file on Windows, which contains all
# dependencies of the target. Because of the C++ extensions, it would be very
# inefficient if the py_test zips all runfiles, plus we don't need them when running
# tests against system installed pip package. So we'd like to get rid of the deps
# of py_test in this case.
#
# In order to trigger the tests without bazel clean after getting rid of deps,
# we introduce the following :
# 1. When --define=no_tensorflow_py_deps=true, the py_test depends on a marker
# file of the pip package, the test gets to rerun when the pip package change.
# Note that this only works on Windows. See the definition of
# //third_party/tensorflow/tools/pip_package:win_pip_package_marker for specific reasons.
# 2. When --define=no_tensorflow_py_deps=false (by default), it's a normal py_test.
def py_test(deps = [], data = [], kernels = [], **kwargs):
# Python version placeholder
native.py_test(
# TODO(jlebar): Ideally we'd use tcmalloc here.,
deps = select({
"//conditions:default": deps,
clean_dep("//tensorflow:no_tensorflow_py_deps"): [],
}),
data = data + select({
"//conditions:default": [],
clean_dep("//tensorflow:no_tensorflow_py_deps"): ["//tensorflow/tools/pip_package:win_pip_package_marker"],
}) + tf_binary_dynamic_kernel_dsos(),
exec_compatible_with = tf_exec_compatible_with(kwargs),
**kwargs
)
register_extension_info(
extension_name = "py_test",
label_regex_for_dep = "{extension_name}",
)
# Similar to py_test above, this macro is used to exclude dependencies for some py_binary
# targets in order to reduce the size of //tensorflow/tools/pip_package:simple_console_windows.
# See https://github.com/tensorflow/tensorflow/issues/22390
def py_binary(name, deps = [], **kwargs):
# Add an extra target for dependencies to avoid nested select statement.
native.py_library(
name = name + "_deps",
deps = deps,
)
# Python version placeholder
native.py_binary(
name = name,
deps = select({
"//conditions:default": [":" + name + "_deps"],
clean_dep("//tensorflow:no_tensorflow_py_deps"): [],
}),
**kwargs
)
register_extension_info(
extension_name = "py_binary",
label_regex_for_dep = "{extension_name}",
)
def tf_py_test(
name,
srcs,
size = "medium",
data = [],
main = None,
args = [],
tags = [],
shard_count = 1,
additional_deps = [],
additional_visibility = [],
kernels = [],
flaky = 0,
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False,
**kwargs):
"""Create one or more python tests with extra tensorflow dependencies."""
xla_test_true_list = []
# xla_enable_strict_auto_jit is used to run Tensorflow unit tests with all XLA compilable
# kernels compiled with XLA.
if xla_enable_strict_auto_jit:
xla_enabled = True
xla_test_true_list += ["//tensorflow/python:is_xla_test_true"]
if xla_enabled:
additional_deps = additional_deps + tf_additional_xla_deps_py()
if grpc_enabled:
additional_deps = additional_deps + tf_additional_grpc_deps_py()
# Python version placeholder
py_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
flaky = flaky,
kernels = kernels,
main = main,
shard_count = shard_count,
srcs_version = "PY2AND3",
tags = tags,
visibility = [clean_dep("//tensorflow:internal")] +
additional_visibility,
deps = depset([
clean_dep("//tensorflow/python:extra_py_tests_deps"),
clean_dep("//tensorflow/python:gradient_checker"),
] + additional_deps + xla_test_true_list),
**kwargs
)
register_extension_info(
extension_name = "tf_py_test",
label_regex_map = {"additional_deps": "deps:{extension_name}"},
)
def gpu_py_test(
name,
srcs,
size = "medium",
data = [],
main = None,
args = [],
shard_count = 1,
additional_deps = [],
kernels = [],
tags = [],
flaky = 0,
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False):
# TODO(b/122522101): Don't ignore xla_enable_strict_auto_jit and enable additional
# XLA tests once enough compute resources are available.
_ignored = [xla_enable_strict_auto_jit]
if main == None:
main = name + ".py"
for config in ["cpu", "gpu"]:
test_name = name
test_tags = tags
if config == "gpu":
test_name += "_gpu"
test_tags = test_tags + tf_gpu_tests_tags()
tf_py_test(
name = test_name,
size = size,
srcs = srcs,
additional_deps = additional_deps,
args = args,
data = data,
flaky = flaky,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = test_tags,
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = False,
)
register_extension_info(
extension_name = "gpu_py_test",
label_regex_map = {"additional_deps": "additional_deps:{extension_name}"},
)
# terminology changes: saving cuda_* definition for compatibility
def cuda_py_test(*args, **kwargs):
gpu_py_test(*args, **kwargs)
register_extension_info(
extension_name = "cuda_py_test",
label_regex_map = {"additional_deps": "additional_deps:{extension_name}"},
)
def sycl_py_test(
name,
srcs,
size = "medium",
data = [],
main = None,
args = [],
shard_count = 1,
additional_deps = [],
kernels = [],
tags = [],
flaky = 0,
xla_enabled = False,
grpc_enabled = False):
test_tags = tags + tf_sycl_tests_tags()
tf_py_test(
name = name,
size = size,
srcs = srcs,
additional_deps = additional_deps,
args = args,
data = data,
flaky = flaky,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = test_tags,
xla_enabled = xla_enabled,
)
register_extension_info(
extension_name = "sycl_py_test",
label_regex_map = {"additional_deps": "additional_deps:{extension_name}"},
)
def py_tests(
name,
srcs,
size = "medium",
additional_deps = [],
kernels = [],
data = [],
tags = [],
shard_count = 1,
prefix = "",
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False):
for src in srcs:
test_name = src.split("/")[-1].split(".")[0]
if prefix:
test_name = "%s_%s" % (prefix, test_name)
tf_py_test(
name = test_name,
size = size,
srcs = [src],
additional_deps = additional_deps,
data = data,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = src,
shard_count = shard_count,
tags = tags,
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = xla_enable_strict_auto_jit,
)
def gpu_py_tests(
name,
srcs,
size = "medium",
additional_deps = [],
kernels = [],
data = [],
shard_count = 1,
tags = [],
prefix = "",
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False):
# TODO(b/122522101): Don't ignore xla_enable_strict_auto_jit and enable additional
# XLA tests once enough compute resources are available.
_ignored = [xla_enable_strict_auto_jit]
test_tags = tags + tf_gpu_tests_tags()
py_tests(
name = name,
size = size,
srcs = srcs,
additional_deps = additional_deps,
data = data,
grpc_enabled = grpc_enabled,
kernels = kernels,
prefix = prefix,
shard_count = shard_count,
tags = test_tags,
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = False,
)
# terminology changes: saving cuda_* definition for compatibility
def cuda_py_tests(*args, **kwargs):
gpu_py_tests(*args, **kwargs)
# Creates a genrule named <name> for running tools/proto_text's generator to
# make the proto_text functions, for the protos passed in <srcs>.
#
# Return a struct with fields (hdrs, srcs) containing the names of the
# generated files.
def tf_generate_proto_text_sources(name, srcs_relative_dir, srcs, protodeps = [], deps = [], visibility = None):
out_hdrs = (
[
p.replace(".proto", ".pb_text.h")
for p in srcs
] + [p.replace(".proto", ".pb_text-impl.h") for p in srcs]
)
out_srcs = [p.replace(".proto", ".pb_text.cc") for p in srcs]
native.genrule(
name = name + "_srcs",
srcs = srcs + protodeps + [clean_dep("//tensorflow/tools/proto_text:placeholder.txt")],
outs = out_hdrs + out_srcs,
visibility = visibility,
cmd =
"$(location //tensorflow/tools/proto_text:gen_proto_text_functions) " +
"$(@D) " + srcs_relative_dir + " $(SRCS)",
tools = [
clean_dep("//tensorflow/tools/proto_text:gen_proto_text_functions"),
],
)
native.filegroup(
name = name + "_hdrs",
srcs = out_hdrs,
visibility = visibility,
)
native.cc_library(
name = name,
srcs = out_srcs,
hdrs = out_hdrs,
visibility = visibility,
deps = deps,
)
def tf_genrule_cmd_append_to_srcs(to_append):
return ("cat $(SRCS) > $(@) && " + "echo >> $(@) && " + "echo " + to_append +
" >> $(@)")
def tf_version_info_genrule():
native.genrule(
name = "version_info_gen",
srcs = [
clean_dep("@local_config_git//:gen/spec.json"),
clean_dep("@local_config_git//:gen/head"),
clean_dep("@local_config_git//:gen/branch_ref"),
],
outs = ["util/version_info.cc"],
cmd =
"$(location //tensorflow/tools/git:gen_git_source) --generate $(SRCS) \"$@\" --git_tag_override=$${GIT_TAG_OVERRIDE:-}",
local = 1,
tools = [clean_dep("//tensorflow/tools/git:gen_git_source")],
)
def tf_py_build_info_genrule():
native.genrule(
name = "py_build_info_gen",
outs = ["platform/build_info.py"],
cmd =
"$(location //tensorflow/tools/build_info:gen_build_info) --raw_generate \"$@\" " +
" --is_config_cuda " + if_cuda("True", "False") +
" --is_config_rocm " + if_rocm("True", "False") +
" --key_value " +
if_cuda(" cuda_version_number=$${TF_CUDA_VERSION:-} cudnn_version_number=$${TF_CUDNN_VERSION:-} ", "") +
if_windows(" msvcp_dll_name=msvcp140.dll ", "") +
if_windows_cuda(" ".join([
"nvcuda_dll_name=nvcuda.dll",
"cudart_dll_name=cudart64_$$(echo $${TF_CUDA_VERSION:-} | sed \"s/\\.//\").dll",
"cudnn_dll_name=cudnn64_$${TF_CUDNN_VERSION:-}.dll",
]), ""),
local = 1,
tools = [clean_dep("//tensorflow/tools/build_info:gen_build_info")],
)
def cc_library_with_android_deps(
deps,
android_deps = [],
common_deps = [],
copts = tf_copts(),
**kwargs):
deps = if_not_android(deps) + if_android(android_deps) + common_deps
native.cc_library(deps = deps, copts = copts, **kwargs)
register_extension_info(
extension_name = "cc_library_with_android_deps",
label_regex_for_dep = "{extension_name}",
)
def tensorflow_opensource_extra_deps():
return []
# buildozer: disable=function-docstring-args
def pybind_extension(
name,
srcs,
module_name,
hdrs = [],
features = [],
srcs_version = "PY2AND3",
data = [],
copts = None,
linkopts = [],
deps = [],
visibility = None,
testonly = None,
licenses = None,
compatible_with = None,
restricted_to = None,
deprecation = None):
"""Builds a generic Python extension module."""
_ignore = [module_name]
p = name.rfind("/")
if p == -1:
sname = name
prefix = ""
else:
sname = name[p + 1:]
prefix = name[:p + 1]
so_file = "%s%s.so" % (prefix, sname)
pyd_file = "%s%s.pyd" % (prefix, sname)
symbol = "init%s" % sname
symbol2 = "init_%s" % sname
symbol3 = "PyInit_%s" % sname
exported_symbols_file = "%s-exported-symbols.lds" % name
version_script_file = "%s-version-script.lds" % name
native.genrule(
name = name + "_exported_symbols",
outs = [exported_symbols_file],
cmd = "echo '_%s\n_%s\n_%s' >$@" % (symbol, symbol2, symbol3),
output_licenses = ["unencumbered"],
visibility = ["//visibility:private"],
testonly = testonly,
)
native.genrule(
name = name + "_version_script",
outs = [version_script_file],
cmd = "echo '{global:\n %s;\n %s;\n %s;\n local: *;};' >$@" % (symbol, symbol2, symbol3),
output_licenses = ["unencumbered"],
visibility = ["//visibility:private"],
testonly = testonly,
)
native.cc_binary(
name = so_file,
srcs = srcs + hdrs,
data = data,
copts = copts,
linkopts = linkopts + _rpath_linkopts(name) + select({
"@local_config_cuda//cuda:darwin": [
"-Wl,-exported_symbols_list,$(location %s)" % exported_symbols_file,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,--version-script",
"$(location %s)" % version_script_file,
],
}),
deps = deps + [
exported_symbols_file,
version_script_file,
],
features = features,
linkshared = 1,
testonly = testonly,
licenses = licenses,
visibility = visibility,
deprecation = deprecation,
restricted_to = restricted_to,
compatible_with = compatible_with,
)
native.genrule(
name = name + "_pyd_copy",
srcs = [so_file],
outs = [pyd_file],
cmd = "cp $< $@",
output_to_bindir = True,
visibility = visibility,
deprecation = deprecation,
restricted_to = restricted_to,
compatible_with = compatible_with,
)
native.py_library(
name = name,
data = select({
"@org_tensorflow//tensorflow:windows": [pyd_file],
"//conditions:default": [so_file],
}),
srcs_version = srcs_version,
licenses = licenses,
testonly = testonly,
visibility = visibility,
deprecation = deprecation,
restricted_to = restricted_to,
compatible_with = compatible_with,
)
# buildozer: enable=function-docstring-args
def tf_python_pybind_extension(
name,
srcs,
module_name,
hdrs = [],
features = [],
copts = None,
deps = []):
"""A wrapper macro for pybind_extension that is used in tensorflow/python/BUILD.
It is used for targets under //third_party/tensorflow/python that link
against libtensorflow_framework.so and pywrap_tensorflow_internal.so.
"""
pybind_extension(
name,
srcs + tf_binary_additional_srcs(),
module_name,
hdrs = hdrs,
features = features,
copts = copts,
deps = deps + tf_binary_pybind_deps() + if_mkl_ml(["//third_party/mkl:intel_binary_blob"]),
)
def if_cuda_or_rocm(if_true, if_false = []):
"""Shorthand for select()'ing whether to build for either CUDA or ROCm.
Returns a select statement which evaluates to
if_true if we're building with either CUDA or ROCm enabled.
if_false, otherwise.
Sometimes a target has additional CUDa or ROCm specific dependencies.
The `if_cuda` / `if_rocm` functions are used to specify these additional
dependencies. For eg, see the `//tensorflow/core/kernels:bias_op` target
If the same additional dependency is needed for both CUDA and ROCm
(for eg. `reduction_ops` dependency for the `bias_op` target above),
then specifying that dependency in both both `if_cuda` and `if_rocm` will
result in both those functions returning a select statement, which contains
the same dependency, which then leads to a duplicate dependency bazel error.
In order to work around this error, any additional dependency that is common
to both the CUDA and ROCm platforms, should be specified using this function.
Doing so will eliminate the cause of the bazel error (i.e. the same
dependency showing up in two different select statements)
"""
return select({
"@local_config_cuda//cuda:using_nvcc": if_true,
"@local_config_cuda//cuda:using_clang": if_true,
"@local_config_rocm//rocm:using_hipcc": if_true,
"//conditions:default": if_false,
})
def tf_jit_compilation_passes_extra_deps():
return []
def if_mlir(if_true, if_false = []):
return select({
"//conditions:default": if_false,
"//tensorflow:with_mlir_support": if_true,
})
# TODO(b/138724071): Remove when build is stable.
def if_mlir_tflite(if_true, if_false = []):
return if_true # Internally we always build with MLIR.
def tfcompile_extra_flags():
return ""
def tf_grpc_dependency():
return "//tensorflow:grpc"
def tf_grpc_cc_dependency():
return "//tensorflow:grpc++"
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import json
from models import models
from models import utils
from models.models import Student
from models.models import StudentAnswersEntity
from utils import BaseHandler
from google.appengine.ext import db
def store_score(student, assessment_type, score):
"""Stores a student's score on a particular assessment.
Args:
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the (possibly modified) assessment_type, which the caller can
use to render an appropriate response page.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = utils.get_score(student, assessment_type)
# remember to cast to int for comparison
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_type, score)
# special handling for computing final score:
if assessment_type == 'postcourse':
midcourse_score = utils.get_score(student, 'midcourse')
if midcourse_score is None:
midcourse_score = 0
else:
midcourse_score = int(midcourse_score)
if existing_score is None:
postcourse_score = score
else:
postcourse_score = int(existing_score)
if score > postcourse_score:
postcourse_score = score
# Calculate overall score based on a formula
overall_score = int((0.3 * midcourse_score) + (0.7 * postcourse_score))
# TODO(pgbovine): this changing of assessment_type is ugly ...
if overall_score >= 70:
assessment_type = 'postcourse_pass'
else:
assessment_type = 'postcourse_fail'
utils.set_score(student, 'overall_score', overall_score)
return assessment_type
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_type, new_answers, score):
"""Stores answer and updates user scores."""
student = Student.get_by_email(email)
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_type, new_answers)
assessment_type = store_score(student, assessment_type, score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), json.dumps({
'type': 'assessment-%s' % assessment_type,
'values': new_answers, 'location': 'AnswerHandler'}))
return (student, assessment_type)
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
assessment_type = self.request.get('assessment_type')
# Convert answers from JSON to dict.
answers = self.request.get('answers')
if answers:
answers = json.loads(answers)
else:
answers = []
# TODO(pgbovine): consider storing as float for better precision
score = int(round(float(self.request.get('score'))))
# Record score.
(student, assessment_type) = self.update_assessment_transaction(
student.key().name(), assessment_type, answers, score)
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_type
self.template_value['student_score'] = utils.get_score(
student, 'overall_score')
self.render('test_confirmation.html')
|
#
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
#!/usr/bin/python
doc = """\
Node manager listens to process state change events and
other flag value change events to provide advanced service
management functionality.
Rules files looks like following:
====================
{ "Rules": [
{"process_name": "contrail-query-engine",
"process_state": "PROCESS_STATE_FATAL",
"action": "supervisorctl -s http://localhost:9002 """ + \
"""\stop contrail-analytics-api"},
{"process_name": "contrail-query-engine",
"process_state": "PROCESS_STATE_STOPPED",
"action": "supervisorctl -s http://localhost:9002 """ + \
"""\stop contrail-analytics-api"},
{"processname": "contrail-collector",
"process_state": "PROCESS_STATE_RUNNING",
"action": "/usr/bin/echo collector is starting >> /tmp/log"},
{"flag_name": "test", "flag_value":"true",
"action": "/usr/bin/echo flag test is set true >> /tmp/log.1"}
]
}
====================
"""
from gevent import monkey
monkey.patch_all()
import os
import os.path
import sys
import argparse
import socket
import gevent
import ConfigParser
import signal
import random
import hashlib
from nodemgr.analytics_nodemgr.analytics_event_manager import AnalyticsEventManager
from nodemgr.control_nodemgr.control_event_manager import ControlEventManager
from nodemgr.config_nodemgr.config_event_manager import ConfigEventManager
from nodemgr.vrouter_nodemgr.vrouter_event_manager import VrouterEventManager
from nodemgr.database_nodemgr.database_event_manager import DatabaseEventManager
from pysandesh.sandesh_base import Sandesh, SandeshSystem, SandeshConfig
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
def usage():
print doc
sys.exit(255)
def main(args_str=' '.join(sys.argv[1:])):
# Parse Arguments
node_parser = argparse.ArgumentParser(add_help=False)
node_parser.add_argument("--nodetype",
default='contrail-analytics',
help='Type of node which nodemgr is managing')
try:
args, remaining_argv = node_parser.parse_known_args(args_str.split())
except:
usage()
default = {'rules': '',
'collectors': [],
'hostip': '127.0.0.1',
'db_port': '9042',
'minimum_diskgb': 256,
'contrail_databases': 'config analytics',
'cassandra_repair_interval': 24,
'cassandra_repair_logdir': '/var/log/contrail/',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY
}
default.update(SandeshConfig.get_default_options(['DEFAULTS']))
sandesh_opts = SandeshConfig.get_default_options()
node_type = args.nodetype
if (node_type == 'contrail-analytics'):
config_file = '/etc/contrail/contrail-analytics-nodemgr.conf'
elif (node_type == 'contrail-config'):
config_file = '/etc/contrail/contrail-config-nodemgr.conf'
elif (node_type == 'contrail-control'):
config_file = '/etc/contrail/contrail-control-nodemgr.conf'
elif (node_type == 'contrail-vrouter'):
config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf'
elif (node_type == 'contrail-database'):
config_file = '/etc/contrail/contrail-database-nodemgr.conf'
else:
sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n")
return
if (os.path.exists(config_file) == False):
sys.stderr.write("config file " + config_file + " is not present" + "\n")
return
config = ConfigParser.SafeConfigParser()
config.read([config_file])
if 'DEFAULTS' in config.sections():
default.update(dict(config.items('DEFAULTS')))
if 'COLLECTOR' in config.sections():
try:
collector = config.get('COLLECTOR', 'server_list')
default['collectors'] = collector.split()
except ConfigParser.NoOptionError as e:
pass
SandeshConfig.update_options(sandesh_opts, config)
parser = argparse.ArgumentParser(parents=[node_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
default.update(sandesh_opts)
parser.set_defaults(**default)
parser.add_argument("--rules",
help='Rules file to use for processing events')
parser.add_argument("--collectors",
nargs='+',
help='Collector addresses in format' +
'ip1:port1 ip2:port2')
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument("--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
SandeshConfig.add_parser_arguments(parser, add_dscp=True)
if (node_type == 'contrail-database' or node_type == 'contrail-config'):
parser.add_argument("--minimum_diskGB",
type=int,
dest='minimum_diskgb',
help="Minimum disk space in GB's")
parser.add_argument("--contrail_databases",
nargs='+',
help='Contrail databases on this node' +
'in format: config analytics' )
parser.add_argument("--hostip",
help="IP address of host")
parser.add_argument("--db_port",
help="Cassandra DB cql port")
parser.add_argument("--cassandra_repair_interval", type=int,
help="Time in hours to periodically run "
"nodetool repair for cassandra maintenance")
parser.add_argument("--cassandra_repair_logdir",
help="Directory for storing repair logs")
try:
_args = parser.parse_args(remaining_argv)
except:
usage()
rule_file = _args.rules
# randomize collector list
_args.chksum = ""
if _args.collectors:
_args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest()
_args.random_collectors = random.sample(_args.collectors, len(_args.collectors))
_args.collectors = _args.random_collectors
# done parsing arguments
prog = None
if (node_type == 'contrail-analytics'):
if not rule_file:
rule_file = "/etc/contrail/supervisord_analytics_files/" + \
"contrail-analytics.rules"
unit_names = ['contrail-collector.service',
'contrail-analytics-api.service',
'contrail-snmp-collector.service',
'contrail-query-engine.service',
'contrail-alarm-gen.service',
'contrail-topology.service',
'contrail-analytics-nodemgr.service',
]
prog = AnalyticsEventManager(_args, rule_file, unit_names)
elif (node_type == 'contrail-config'):
if not rule_file:
rule_file = "/etc/contrail/supervisord_config_files/" + \
"contrail-config.rules"
unit_names = ['contrail-api.service',
'contrail-schema.service',
'contrail-svc-monitor.service',
'contrail-device-manager.service',
'contrail-config-nodemgr.service',
]
prog = ConfigEventManager(_args, rule_file, unit_names)
elif (node_type == 'contrail-control'):
if not rule_file:
rule_file = "/etc/contrail/supervisord_control_files/" + \
"contrail-control.rules"
unit_names = ['contrail-control.service',
'contrail-dns.service',
'contrail-named.service',
'contrail-control-nodemgr.service',
]
prog = ControlEventManager(_args, rule_file, unit_names)
elif (node_type == 'contrail-vrouter'):
if not rule_file:
rule_file = "/etc/contrail/supervisord_vrouter_files/" + \
"contrail-vrouter.rules"
unit_names = ['contrail-vrouter-agent.service',
'contrail-vrouter-nodemgr.service',
]
prog = VrouterEventManager(_args, rule_file, unit_names)
elif (node_type == 'contrail-database'):
if not rule_file:
rule_file = "/etc/contrail/supervisord_database_files/" + \
"contrail-database.rules"
unit_names = ['contrail-database.service',
'kafka.service',
'contrail-database-nodemgr.service',
]
prog = DatabaseEventManager(_args, rule_file, unit_names)
else:
sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n")
return
prog.process()
prog.send_nodemgr_process_status()
prog.send_process_state_db(prog.group_names)
prog.config_file = config_file
prog.collector_chksum = _args.chksum
if _args.collectors:
prog.random_collectors = _args.random_collectors
""" @sighup
Reconfig of collector list
"""
gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler)
gevent.joinall([gevent.spawn(prog.runforever),
gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60))])
if __name__ == '__main__':
main()
|
from django import forms
from django.forms.widgets import PasswordInput
from modules.common.id_choicefield import IdentificationField
class PartyForm(forms.Form):
error_messages = {
'password_mismatch': (
'The confirmation was different from that you chose.'
),
}
party_name = forms.CharField(label="Name of the contesting party")
cname = forms.CharField(label="Candidate's Name")
age = forms.IntegerField(min_value=0, label="Candidate's Age")
citype = IdentificationField(label="Identity Proof of the Candidate")
cidno = forms.CharField(label="Passport / ID Number")
party_manifesto = forms.CharField(
widget=forms.Textarea,
required=False
)
party_symbol = forms.ImageField(
required=False,
help_text="The maximum size permitted is 2.5 MB"
)
cpd1 = forms.CharField(
widget=PasswordInput,
label="Enter your password",
strip=False,
)
cpd2 = forms.CharField(
widget=PasswordInput,
label="Confirm Password",
strip=False,
help_text=("Enter the same password as before, for verification")
)
show_profile = forms.ChoiceField(
choices=[
(True, "Show Profile to public"),
(False, "Hide profile from public")
],
help_text="The Election Commission can override this setting."
)
class PartyEditForm(forms.Form):
party_name = forms.CharField()
cpass = forms.CharField(
widget=PasswordInput,
label="Enter your password",
strip=False,
)
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from django import forms
from django import http
from django import shortcuts
from django.views import generic
import six
from horizon import exceptions
from horizon.forms import views as hz_views
from horizon.forms.views import ADD_TO_FIELD_HEADER # noqa
from horizon import messages
class WorkflowView(hz_views.ModalBackdropMixin, generic.TemplateView):
"""A generic class-based view which handles the intricacies of workflow
processing with minimal user configuration.
.. attribute:: workflow_class
The :class:`~horizon.workflows.Workflow` class which this view handles.
Required.
.. attribute:: template_name
The template to use when rendering this view via standard HTTP
requests. Required.
.. attribute:: ajax_template_name
The template to use when rendering the workflow for AJAX requests.
In general the default common template should be used. Defaults to
``"horizon/common/_workflow.html"``.
.. attribute:: context_object_name
The key which should be used for the workflow object in the template
context. Defaults to ``"workflow"``.
"""
workflow_class = None
template_name = 'horizon/common/_workflow_base.html'
context_object_name = "workflow"
ajax_template_name = 'horizon/common/_workflow.html'
step_errors = {}
def __init__(self):
super(WorkflowView, self).__init__()
if not self.workflow_class:
raise AttributeError("You must set the workflow_class attribute "
"on %s." % self.__class__.__name__)
def get_initial(self):
"""Returns initial data for the workflow. Defaults to using the GET
parameters to allow pre-seeding of the workflow context values.
"""
return copy.copy(self.request.GET)
def get_workflow(self):
"""Returns the instantiated workflow class."""
extra_context = self.get_initial()
entry_point = self.request.GET.get("step", None)
workflow = self.workflow_class(self.request,
context_seed=extra_context,
entry_point=entry_point)
return workflow
def get_context_data(self, **kwargs):
"""Returns the template context, including the workflow class.
This method should be overridden in subclasses to provide additional
context data to the template.
"""
context = super(WorkflowView, self).get_context_data(**kwargs)
workflow = self.get_workflow()
context[self.context_object_name] = workflow
next = self.request.REQUEST.get(workflow.redirect_param_name, None)
context['REDIRECT_URL'] = next
context['layout'] = self.get_layout()
# For consistency with Workflow class
context['modal'] = 'modal' in context['layout']
if ADD_TO_FIELD_HEADER in self.request.META:
context['add_to_field'] = self.request.META[ADD_TO_FIELD_HEADER]
return context
def get_layout(self):
"""returns classes for the workflow element in template based on
the workflow characteristics
"""
if self.request.is_ajax():
layout = ['modal', ]
if self.workflow_class.fullscreen:
layout += ['fullscreen', ]
else:
layout = ['static_page', ]
if self.workflow_class.wizard:
layout += ['wizard', ]
return layout
def get_template_names(self):
"""Returns the template name to use for this request."""
if self.request.is_ajax():
template = self.ajax_template_name
else:
template = self.template_name
return template
def get_object_id(self, obj):
return getattr(obj, "id", None)
def get_object_display(self, obj):
return getattr(obj, "name", None)
def add_error_to_step(self, error_msg, step):
self.step_errors[step] = error_msg
def set_workflow_step_errors(self, context):
workflow = context['workflow']
for step in self.step_errors:
error_msg = self.step_errors[step]
workflow.add_error_to_step(error_msg, step)
def get(self, request, *args, **kwargs):
"""Handler for HTTP GET requests."""
context = self.get_context_data(**kwargs)
self.set_workflow_step_errors(context)
return self.render_to_response(context)
def validate_steps(self, request, workflow, start, end):
"""Validates the workflow steps from ``start`` to ``end``, inclusive.
Returns a dict describing the validation state of the workflow.
"""
errors = {}
for step in workflow.steps[start:end + 1]:
if not step.action.is_valid():
errors[step.slug] = dict(
(field, [unicode(error) for error in errors])
for (field, errors) in six.iteritems(step.action.errors))
return {
'has_errors': bool(errors),
'workflow_slug': workflow.slug,
'errors': errors,
}
def post(self, request, *args, **kwargs):
"""Handler for HTTP POST requests."""
context = self.get_context_data(**kwargs)
workflow = context[self.context_object_name]
try:
# Check for the VALIDATE_STEP* headers, if they are present
# and valid integers, return validation results as JSON,
# otherwise proceed normally.
validate_step_start = int(self.request.META.get(
'HTTP_X_HORIZON_VALIDATE_STEP_START', ''))
validate_step_end = int(self.request.META.get(
'HTTP_X_HORIZON_VALIDATE_STEP_END', ''))
except ValueError:
# No VALIDATE_STEP* headers, or invalid values. Just proceed
# with normal workflow handling for POSTs.
pass
else:
# There are valid VALIDATE_STEP* headers, so only do validation
# for the specified steps and return results.
data = self.validate_steps(request, workflow,
validate_step_start,
validate_step_end)
return http.HttpResponse(json.dumps(data),
content_type="application/json")
if not workflow.is_valid():
return self.render_to_response(context)
try:
success = workflow.finalize()
except forms.ValidationError:
return self.render_to_response(context)
except Exception:
success = False
exceptions.handle(request)
if success:
msg = workflow.format_status_message(workflow.success_message)
messages.success(request, msg)
else:
msg = workflow.format_status_message(workflow.failure_message)
messages.error(request, msg)
if "HTTP_X_HORIZON_ADD_TO_FIELD" in self.request.META:
field_id = self.request.META["HTTP_X_HORIZON_ADD_TO_FIELD"]
response = http.HttpResponse()
if workflow.object:
data = [self.get_object_id(workflow.object),
self.get_object_display(workflow.object)]
response.content = json.dumps(data)
response["X-Horizon-Add-To-Field"] = field_id
return response
next_url = self.request.REQUEST.get(workflow.redirect_param_name, None)
return shortcuts.redirect(next_url or workflow.get_success_url())
|
from PygFW import Event
import pygame
class EntityClickEvent(Event):
def __init__(self, scene_surface):
Event.__init__(self, scene_surface, pygame.MOUSEBUTTONDOWN)
def executor(self, scene, event):
for entity in scene.entities._list_:
if entity.clickable:
if entity.collides_with([event.pos]):
entity.click(scene, event)
class EntityUnclickEvent(Event):
def __init__(self, scene_surface):
Event.__init__(self, scene_surface, pygame.MOUSEBUTTONUP)
def executor(self, scene, event):
for entity in scene.entities._list_:
if entity.un_clickable:
entity.un_click(scene, event)
|
#!/usr/bin/env python
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2008-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
import time
from drmaa2 import JobSession
from drmaa2 import JobInfo
from drmaa2 import Time
if __name__ == '__main__':
js = JobSession('js-01')
print('Created job session: %s' % js.name)
j = js.run_job({'remote_command': '/bin/sleep', 'args': ['100']})
print('Submitted job: %s, waiting on start' % j)
t1 = time.time()
j.wait_started(10)
t2 = time.time()
print('Wait on job start is over after %s seconds' % (t2 - t1))
ji = j.get_info()
print('Retrieved job info: %s' % ji)
print('Waiting on job %s termination' % j.id)
t1 = time.time()
j.wait_terminated(Time.INFINITE_TIME.value)
t2 = time.time()
print('Job terminated, wait is over after %s seconds' % (t2 - t1))
ji = j.get_info()
print('Retrieved job info after termination: %s' % ji)
|
import argparse
import os
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
import models
def predict(model_data_path, image_path):
# Default input size
height = 228
width = 304
channels = 3
batch_size = 1
# Read image
img = Image.open(image_path)
img = img.resize([width,height], Image.ANTIALIAS)
img = np.array(img).astype('float32')
img = np.expand_dims(np.asarray(img), axis = 0)
# Create a placeholder for the input image
input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))
# Construct the network
net = models.ResNet50UpProj({'data': input_node}, batch_size)
with tf.Session() as sess:
# Load the converted parameters
print('Loading the model')
net.load(model_data_path, sess)
uninitialized_vars = []
for var in tf.global_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
init_new_vars_op = tf.variables_initializer(uninitialized_vars)
sess.run(init_new_vars_op)
# Evalute the network for the given image
pred = sess.run(net.get_output(), feed_dict={input_node: img})
# Plot result
fig = plt.figure()
ii = plt.imshow(pred[0,:,:,0], interpolation='nearest')
fig.colorbar(ii)
plt.show()
return pred
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('model_path', help='Converted parameters for the model')
parser.add_argument('image_paths', help='Directory of images to predict')
args = parser.parse_args()
# Predict the image
pred = predict(args.model_path, args.image_paths)
os._exit(0)
if __name__ == '__main__':
main()
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] , ['LSTM'] );
|
import inspect
import io
import json
import logging as logginglib
import sys
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Set, TextIO, Tuple
from typing_extensions import Literal
from hpc.autoscale import hpclogging as logging
from hpc.autoscale.codeanalysis import hpcwrapclass
from hpc.autoscale.hpctypes import Hostname
from hpc.autoscale.job.demand import DemandResult
from hpc.autoscale.node.node import Node
OutputFormat = Literal["json", "table", "table_headerless"]
@hpcwrapclass
class DemandPrinter:
def __init__(
self,
column_names: Optional[List[str]] = None,
stream: Optional[TextIO] = None,
output_format: OutputFormat = "table",
long: bool = False,
) -> None:
column_names_list: List[str] = []
if column_names:
column_names_list = column_names
self.__defaults = {}
for n in range(len(column_names_list)):
expr = column_names_list[n]
if ":" in expr and "[" not in expr:
column, default_value = expr.split(":", 1)
column_names_list[n] = column
self.__defaults[column] = default_value
self.column_names = [x.lower() for x in column_names_list]
self.stream = stream or sys.stdout
self.output_format = output_format
self.long = long
def _calc_width(self, columns: List[str], rows: List[List[str]]) -> Tuple[int, ...]:
maxes = [len(c) for c in columns]
for row in rows:
for n in range(len(row)):
maxes[n] = max(len(row[n]), maxes[n])
return tuple(maxes)
def _get_all_columns(self, compute_nodes: List[Node]) -> List[str]:
columns = []
for attr_name in dir(Node):
if not attr_name[0].isalpha():
continue
attr = getattr(Node, attr_name)
if hasattr(attr, "__call__"):
continue
columns.append(attr_name)
if compute_nodes:
all_available: Set[str] = set()
for n in compute_nodes:
all_available.update(n.available.keys())
columns += list(all_available)
assert None not in columns
columns = sorted(columns)
return columns
def print_columns(self, demand_result: DemandResult = None) -> None:
columns = self.column_names
if not columns:
columns = self._get_all_columns(
demand_result.compute_nodes if demand_result else []
)
columns = [c for c in columns if c != "hostname_required"]
widths = self._calc_width(columns, [])
formats = " ".join(["{:%d}" % x for x in widths])
assert len(widths) == len(columns), "{} != {}".format(len(widths), len(columns))
print(formats.format(*columns), file=self.stream)
self.stream.flush()
def print_demand(self, demand_result: DemandResult) -> None:
rows = []
columns = self.column_names
if not columns:
columns = self._get_all_columns(demand_result.compute_nodes)
if self.output_format == "json":
columns = [c for c in columns if c not in ["hostname_required"]]
else:
columns = [
c
for c in columns
if c not in ["available", "node", "hostname_required"]
]
columns = ["job_ids" if c == "assigned_job_ids" else c for c in columns]
if "name" in columns:
columns.remove("name")
columns.insert(0, "name")
short_columns = [c.split("@")[0] for c in columns]
long_columns = [c.split("@")[-1] for c in columns]
# sort by private ip or the node name
def sort_by_ip_or_name(node: Node) -> Any:
if node.private_ip:
return tuple(map(int, node.private_ip.split(".")))
name_toks = node.name.split("-")
if name_toks[-1].isdigit():
node_index = int(name_toks[-1])
nodearray_ord = [ord(x) for x in node.nodearray]
# 2**31 to make these come after private ips
# then nodearray name, then index
return tuple([2 ** 31] + nodearray_ord + [node_index])
return tuple([-1] + name_toks)
ordered_nodes = sorted(demand_result.compute_nodes, key=sort_by_ip_or_name)
for node in ordered_nodes:
row: List[str] = []
rows.append(row)
for column in long_columns:
# TODO justify - this is a printing function, so this value could be lots of things etc.
value: Any = None
is_from_available = column.startswith("*")
is_ratio = column.startswith("/")
is_slice = "[" in column
if is_from_available or is_ratio:
column = column[1:]
def _slice(v: str) -> str:
return v
slice = _slice
if is_slice:
slice_expr = column[column.index("[") :]
column = column.split("[")[0]
# TODO maybe parse this instead of eval-ing a lambda
if self.long:
slice = lambda v: v # noqa: E731
else:
slice = eval(
"lambda v: v%s if v is not None else v" % slice_expr
)
if column == "hostname":
hostname = node.hostname
if not node.exists or not hostname:
if node.private_ip:
hostname = Hostname(str(node.private_ip))
else:
hostname = Hostname("tbd")
value = hostname
elif column == "hostname_required":
continue
elif column == "job_ids":
value = node.assignments
elif hasattr(node, column):
value = getattr(node, column)
else:
if is_from_available:
value = node.available.get(column)
elif is_ratio:
value = "{}/{}".format(
node.available.get(column), node.resources.get(column)
)
elif column in node.resources:
value = node.resources.get(column)
else:
value = node.metadata.get(column)
if value is None:
value = self.__defaults.get(column)
# convert sets to lists, as sets are not json serializable
if isinstance(value, set):
value = list(value)
elif isinstance(value, datetime):
value = value.isoformat()
# for json, we support lists, null, numbers etc.
# for table* we will output a string for every value.
if self.output_format != "json":
if isinstance(value, list):
value = ",".join(sorted(value))
elif isinstance(value, set):
value = ",".join(sorted(list(value)))
elif value is None:
value = ""
elif isinstance(value, float):
value = "{:.1f}".format(value)
elif not isinstance(value, str):
value = str(value)
else:
if hasattr(value, "to_json"):
value = value.to_json()
elif hasattr(value, "keys"):
value = dict(value)
row.append(slice(value))
# remove / and slice expressions
stripped_short_names = [c.lstrip("/").split("[")[0] for c in short_columns]
if self.output_format != "json":
stripped_short_names = [x.upper() for x in stripped_short_names]
print_rows(stripped_short_names, rows, self.stream, self.output_format)
def __str__(self) -> str:
return "DemandPrinter(columns={}, output_format={}, stream={})".format(
str(self.column_names), self.output_format, self.stream
)
def __repr__(self) -> str:
return str(self)
def print_columns(
demand_result: DemandResult,
stream: Optional[TextIO] = None,
output_format: OutputFormat = "table",
long: bool = False,
) -> None:
printer = DemandPrinter(None, stream=stream, output_format=output_format, long=long)
printer.print_columns(demand_result)
def print_demand(
columns: List[str],
demand_result: DemandResult,
stream: Optional[TextIO] = None,
output_format: OutputFormat = "table",
log: bool = False,
long: bool = False,
) -> None:
if log:
stream = logging_stream(stream or sys.stdout)
printer = DemandPrinter(
columns, stream=stream, output_format=output_format, long=long
)
printer.print_demand(demand_result)
def wrap_text_io(clz: Any) -> Callable[[TextIO, Optional[str]], TextIO]:
members: Dict[str, Any] = {}
for attr in dir(TextIO):
if not attr[0].islower() and attr not in [
"__enter__",
"__exit__",
"__iter__",
"__next__",
]:
continue
if attr in dir(clz):
continue
def make_member(mem_name: str) -> Any:
is_function = inspect.isfunction(getattr(TextIO, mem_name))
if is_function:
return lambda *args: getattr(args[0].wrapped, mem_name)(*args[1:])
else:
return property(lambda *args: getattr(args[0].wrapped, mem_name))
members[attr] = make_member(attr)
return type("LoggingStream", (clz,), members)
class _LoggingStream:
def __init__(self, wrapped: TextIO, logger_name: Optional[str] = None) -> None:
self.line_buffer = io.StringIO()
self.wrapped = wrapped
self.logger_name = logger_name
def write(self, s: str) -> int:
self.line_buffer.write(s)
return self.wrapped.write(s)
def flush(self) -> None:
buf = self.line_buffer.getvalue()
if not buf:
return
fact = logginglib.getLogRecordFactory()
logger = logging.getLogger(self.logger_name)
created = None
for line in buf.splitlines(keepends=False):
record = fact(
name="demandprinter",
level=logging.INFO,
pathname=__file__,
lineno=1,
msg=line,
args=(),
exc_info=None,
created=created,
)
created = created or record.created
logger.handle(record)
self.line_buffer = io.StringIO()
def close(self) -> None:
self.flush()
self.wrapped.close()
LoggingStream = wrap_text_io(_LoggingStream)
def logging_stream(wrapped: TextIO, logger_name: Optional[str] = None) -> TextIO:
logger_name = logger_name or "demand"
return LoggingStream(wrapped, logger_name)
class ExcludeDemandPrinterFilter(logginglib.Filter):
def __init__(self, name: str = "") -> None:
super().__init__(name)
def filter(self, record: logginglib.LogRecord) -> bool:
return record.name != "demandprinter"
def calculate_column_widths(
columns: List[str], rows: List[List[str]]
) -> Tuple[int, ...]:
maxes = [len(c.split("@")[0]) for c in columns]
for row in rows:
for n in range(len(row)):
maxes[n] = max(len(row[n]), maxes[n])
return tuple(maxes)
def print_rows(
columns: List[str],
rows: List[List[str]],
stream: Optional[TextIO] = None,
output_format: str = "table",
) -> None:
output_format = output_format or "table"
stream = stream or sys.stdout
short_names = [c.split("@")[0] for c in columns]
if output_format.lower() == "json":
json.dump(
[dict(zip(short_names, row)) for row in rows], stream, indent=2,
)
else:
widths = calculate_column_widths(short_names, rows)
formats = " ".join(["{:%d}" % x for x in widths])
if output_format == "table":
print(formats.format(*short_names), file=stream)
for row in rows:
print(formats.format(*[str(r) for r in row]), file=stream)
stream.flush()
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('LICENSE') as f:
license = f.read()
setup(
name='borg_hydro',
version='0.1.0',
author='Stefan Lüdtke',
url='https://git.gfz-potsdam.de:sluedtke/borg_hydro.git',
packages=find_packages(),
license=license,
include_package_data=True,
tests_require=['pytest'],
install_requires=['pandas', 'numpy']
)
|
"""
This module defines the basic `DefaultObject` and its children
`DefaultCharacter`, `DefaultAccount`, `DefaultRoom` and `DefaultExit`.
These are the (default) starting points for all in-game visible
entities.
"""
import time
import inflect
from builtins import object
from future.utils import with_metaclass
from collections import defaultdict
from django.conf import settings
from evennia.typeclasses.models import TypeclassBase
from evennia.typeclasses.attributes import NickHandler
from evennia.objects.manager import ObjectManager
from evennia.objects.models import ObjectDB
from evennia.scripts.scripthandler import ScriptHandler
from evennia.commands import cmdset, command
from evennia.commands.cmdsethandler import CmdSetHandler
from evennia.commands import cmdhandler
from evennia.utils import search
from evennia.utils import logger
from evennia.utils import ansi
from evennia.utils.utils import (variable_from_module, lazy_property,
make_iter, to_unicode, is_iter, list_to_string,
to_str)
from django.utils.translation import ugettext as _
_INFLECT = inflect.engine()
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_ScriptDB = None
_SESSIONS = None
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
# the sessid_max is based on the length of the db_sessid csv field (excluding commas)
_SESSID_MAX = 16 if _MULTISESSION_MODE in (1, 3) else 1
class ObjectSessionHandler(object):
"""
Handles the get/setting of the sessid
comma-separated integer field
"""
def __init__(self, obj):
"""
Initializes the handler.
Args:
obj (Object): The object on which the handler is defined.
"""
self.obj = obj
self._sessid_cache = []
self._recache()
def _recache(self):
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
self._sessid_cache = list(set(int(val) for val in (self.obj.db_sessid or "").split(",") if val))
if any(sessid for sessid in self._sessid_cache if sessid not in _SESSIONS):
# cache is out of sync with sessionhandler! Only retain the ones in the handler.
self._sessid_cache = [sessid for sessid in self._sessid_cache if sessid in _SESSIONS]
self.obj.db_sessid = ",".join(str(val) for val in self._sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def get(self, sessid=None):
"""
Get the sessions linked to this Object.
Args:
sessid (int, optional): A specific session id.
Returns:
sessions (list): The sessions connected to this object. If `sessid` is given,
this is a list of one (or zero) elements.
Notes:
Aliased to `self.all()`.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
if sessid:
sessions = [_SESSIONS[sessid] if sessid in _SESSIONS else None] if sessid in self._sessid_cache else []
else:
sessions = [_SESSIONS[ssid] if ssid in _SESSIONS else None for ssid in self._sessid_cache]
if None in sessions:
# this happens only if our cache has gone out of sync with the SessionHandler.
self._recache()
return self.get(sessid=sessid)
return sessions
def all(self):
"""
Alias to get(), returning all sessions.
Returns:
sessions (list): All sessions.
"""
return self.get()
def add(self, session):
"""
Add session to handler.
Args:
session (Session or int): Session or session id to add.
Notes:
We will only add a session/sessid if this actually also exists
in the the core sessionhandler.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
try:
sessid = session.sessid
except AttributeError:
sessid = session
sessid_cache = self._sessid_cache
if sessid in _SESSIONS and sessid not in sessid_cache:
if len(sessid_cache) >= _SESSID_MAX:
return
sessid_cache.append(sessid)
self.obj.db_sessid = ",".join(str(val) for val in sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def remove(self, session):
"""
Remove session from handler.
Args:
session (Session or int): Session or session id to remove.
"""
try:
sessid = session.sessid
except AttributeError:
sessid = session
sessid_cache = self._sessid_cache
if sessid in sessid_cache:
sessid_cache.remove(sessid)
self.obj.db_sessid = ",".join(str(val) for val in sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def clear(self):
"""
Clear all handled sessids.
"""
self._sessid_cache = []
self.obj.db_sessid = None
self.obj.save(update_fields=["db_sessid"])
def count(self):
"""
Get amount of sessions connected.
Returns:
sesslen (int): Number of sessions handled.
"""
return len(self._sessid_cache)
#
# Base class to inherit from.
class DefaultObject(with_metaclass(TypeclassBase, ObjectDB)):
"""
This is the root typeclass object, representing all entities that
have an actual presence in-game. DefaultObjects generally have a
location. They can also be manipulated and looked at. Game
entities you define should inherit from DefaultObject at some distance.
It is recommended to create children of this class using the
`evennia.create_object()` function rather than to initialize the class
directly - this will both set things up and efficiently save the object
without `obj.save()` having to be called explicitly.
"""
objects = ObjectManager()
# on-object properties
@lazy_property
def cmdset(self):
return CmdSetHandler(self, True)
@lazy_property
def scripts(self):
return ScriptHandler(self)
@lazy_property
def nicks(self):
return NickHandler(self)
@lazy_property
def sessions(self):
return ObjectSessionHandler(self)
@property
def is_connected(self):
# we get an error for objects subscribed to channels without this
if self.account: # seems sane to pass on the account
return self.account.is_connected
else:
return False
@property
def has_account(self):
"""
Convenience property for checking if an active account is
currently connected to this object.
"""
return self.sessions.count()
@property
def is_superuser(self):
"""
Check if user has an account, and if so, if it is a superuser.
"""
return self.db_account and self.db_account.is_superuser \
and not self.db_account.attributes.get("_quell")
def contents_get(self, exclude=None):
"""
Returns the contents of this object, i.e. all
objects that has this object set as its location.
This should be publically available.
Args:
exclude (Object): Object to exclude from returned
contents list
Returns:
contents (list): List of contents of this Object.
Notes:
Also available as the `contents` property.
"""
con = self.contents_cache.get(exclude=exclude)
# print "contents_get:", self, con, id(self), calledby() # DEBUG
return con
contents = property(contents_get)
@property
def exits(self):
"""
Returns all exits from this object, i.e. all objects at this
location having the property destination != `None`.
"""
return [exi for exi in self.contents if exi.destination]
# main methods
def get_display_name(self, looker, **kwargs):
"""
Displays the name of the object in a viewer-aware manner.
Args:
looker (TypedObject): The object or account that is looking
at/getting inforamtion for this object.
Returns:
name (str): A string containing the name of the object,
including the DBREF if this user is privileged to control
said object.
Notes:
This function could be extended to change how object names
appear to users in character, but be wary. This function
does not change an object's keys or aliases when
searching, and is expected to produce something useful for
builders.
"""
if self.locks.check_lockstring(looker, "perm(Builder)"):
return "{}(#{})".format(self.name, self.id)
return self.name
def get_numbered_name(self, count, looker, **kwargs):
"""
Return the numbered (singular, plural) forms of this object's key. This is by default called
by return_appearance and is used for grouping multiple same-named of this object. Note that
this will be called on *every* member of a group even though the plural name will be only
shown once. Also the singular display version, such as 'an apple', 'a tree' is determined
from this method.
Args:
count (int): Number of objects of this type
looker (Object): Onlooker. Not used by default.
Kwargs:
key (str): Optional key to pluralize, if given, use this instead of the object's key.
Returns:
singular (str): The singular form to display.
plural (str): The determined plural form of the key, including the count.
"""
key = kwargs.get("key", self.key)
key = ansi.ANSIString(key) # this is needed to allow inflection of colored names
plural = _INFLECT.plural(key, 2)
plural = "%s %s" % (_INFLECT.number_to_words(count, threshold=12), plural)
singular = _INFLECT.an(key)
if not self.aliases.get(plural, category="plural_key"):
# we need to wipe any old plurals/an/a in case key changed in the interrim
self.aliases.clear(category="plural_key")
self.aliases.add(plural, category="plural_key")
# save the singular form as an alias here too so we can display "an egg" and also
# look at 'an egg'.
self.aliases.add(singular, category="plural_key")
return singular, plural
def search(self, searchdata,
global_search=False,
use_nicks=True,
typeclass=None,
location=None,
attribute_name=None,
quiet=False,
exact=False,
candidates=None,
nofound_string=None,
multimatch_string=None,
use_dbref=None):
"""
Returns an Object matching a search string/condition
Perform a standard object search in the database, handling
multiple results and lack thereof gracefully. By default, only
objects in the current `location` of `self` or its inventory are searched for.
Args:
searchdata (str or obj): Primary search criterion. Will be matched
against `object.key` (with `object.aliases` second) unless
the keyword attribute_name specifies otherwise.
**Special strings:**
- `#<num>`: search by unique dbref. This is always
a global search.
- `me,self`: self-reference to this object
- `<num>-<string>` - can be used to differentiate
between multiple same-named matches
global_search (bool): Search all objects globally. This is overruled
by `location` keyword.
use_nicks (bool): Use nickname-replace (nicktype "object") on `searchdata`.
typeclass (str or Typeclass, or list of either): Limit search only
to `Objects` with this typeclass. May be a list of typeclasses
for a broader search.
location (Object or list): Specify a location or multiple locations
to search. Note that this is used to query the *contents* of a
location and will not match for the location itself -
if you want that, don't set this or use `candidates` to specify
exactly which objects should be searched.
attribute_name (str): Define which property to search. If set, no
key+alias search will be performed. This can be used
to search database fields (db_ will be automatically
prepended), and if that fails, it will try to return
objects having Attributes with this name and value
equal to searchdata. A special use is to search for
"key" here if you want to do a key-search without
including aliases.
quiet (bool): don't display default error messages - this tells the
search method that the user wants to handle all errors
themselves. It also changes the return value type, see
below.
exact (bool): if unset (default) - prefers to match to beginning of
string rather than not matching at all. If set, requires
exact matching of entire string.
candidates (list of objects): this is an optional custom list of objects
to search (filter) between. It is ignored if `global_search`
is given. If not set, this list will automatically be defined
to include the location, the contents of location and the
caller's contents (inventory).
nofound_string (str): optional custom string for not-found error message.
multimatch_string (str): optional custom string for multimatch error header.
use_dbref (bool or None, optional): If `True`, allow to enter e.g. a query "#123"
to find an object (globally) by its database-id 123. If `False`, the string "#123"
will be treated like a normal string. If `None` (default), the ability to query by
#dbref is turned on if `self` has the permission 'Builder' and is turned off
otherwise.
Returns:
match (Object, None or list): will return an Object/None if `quiet=False`,
otherwise it will return a list of 0, 1 or more matches.
Notes:
To find Accounts, use eg. `evennia.account_search`. If
`quiet=False`, error messages will be handled by
`settings.SEARCH_AT_RESULT` and echoed automatically (on
error, return will be `None`). If `quiet=True`, the error
messaging is assumed to be handled by the caller.
"""
is_string = isinstance(searchdata, basestring)
if is_string:
# searchdata is a string; wrap some common self-references
if searchdata.lower() in ("here", ):
return [self.location] if quiet else self.location
if searchdata.lower() in ("me", "self",):
return [self] if quiet else self
if use_dbref is None:
use_dbref = self.locks.check_lockstring(self, "_dummy:perm(Builder)")
if use_nicks:
# do nick-replacement on search
searchdata = self.nicks.nickreplace(searchdata, categories=("object", "account"), include_account=True)
if (global_search or (is_string and searchdata.startswith("#") and
len(searchdata) > 1 and searchdata[1:].isdigit())):
# only allow exact matching if searching the entire database
# or unique #dbrefs
exact = True
candidates = None
elif candidates is None:
# no custom candidates given - get them automatically
if location:
# location(s) were given
candidates = []
for obj in make_iter(location):
candidates.extend(obj.contents)
else:
# local search. Candidates are taken from
# self.contents, self.location and
# self.location.contents
location = self.location
candidates = self.contents
if location:
candidates = candidates + [location] + location.contents
else:
# normally we don't need this since we are
# included in location.contents
candidates.append(self)
results = ObjectDB.objects.object_search(searchdata,
attribute_name=attribute_name,
typeclass=typeclass,
candidates=candidates,
exact=exact,
use_dbref=use_dbref)
if quiet:
return results
return _AT_SEARCH_RESULT(results, self, query=searchdata,
nofound_string=nofound_string, multimatch_string=multimatch_string)
def search_account(self, searchdata, quiet=False):
"""
Simple shortcut wrapper to search for accounts, not characters.
Args:
searchdata (str): Search criterion - the key or dbref of the account
to search for. If this is "here" or "me", search
for the account connected to this object.
quiet (bool): Returns the results as a list rather than
echo eventual standard error messages. Default `False`.
Returns:
result (Account, None or list): Just what is returned depends on
the `quiet` setting:
- `quiet=True`: No match or multumatch auto-echoes errors
to self.msg, then returns `None`. The esults are passed
through `settings.SEARCH_AT_RESULT` and
`settings.SEARCH_AT_MULTIMATCH_INPUT`. If there is a
unique match, this will be returned.
- `quiet=True`: No automatic error messaging is done, and
what is returned is always a list with 0, 1 or more
matching Accounts.
"""
if isinstance(searchdata, basestring):
# searchdata is a string; wrap some common self-references
if searchdata.lower() in ("me", "self",):
return [self.account] if quiet else self.account
results = search.search_account(searchdata)
if quiet:
return results
return _AT_SEARCH_RESULT(results, self, query=searchdata)
def execute_cmd(self, raw_string, session=None, **kwargs):
"""
Do something as this object. This is never called normally,
it's only used when wanting specifically to let an object be
the caller of a command. It makes use of nicks of eventual
connected accounts as well.
Args:
raw_string (string): Raw command input
session (Session, optional): Session to
return results to
Kwargs:
Other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
Returns:
defer (Deferred): This is an asynchronous Twisted object that
will not fire until the command has actually finished
executing. To overload this one needs to attach
callback functions to it, with addCallback(function).
This function will be called with an eventual return
value from the command execution. This return is not
used at all by Evennia by default, but might be useful
for coders intending to implement some sort of nested
command structure.
"""
# nick replacement - we require full-word matching.
# do text encoding conversion
raw_string = to_unicode(raw_string)
raw_string = self.nicks.nickreplace(raw_string, categories=("inputline", "channel"), include_account=True)
return cmdhandler.cmdhandler(self, raw_string, callertype="object", session=session, **kwargs)
def msg(self, text=None, from_obj=None, session=None, options=None, **kwargs):
"""
Emits something to a session attached to the object.
Args:
text (str or tuple, optional): The message to send. This
is treated internally like any send-command, so its
value can be a tuple if sending multiple arguments to
the `text` oob command.
from_obj (obj or list, optional): object that is sending. If
given, at_msg_send will be called. This value will be
passed on to the protocol. If iterable, will execute hook
on all entities in it.
session (Session or list, optional): Session or list of
Sessions to relay data to, if any. If set, will force send
to these sessions. If unset, who receives the message
depends on the MULTISESSION_MODE.
options (dict, optional): Message-specific option-value
pairs. These will be applied at the protocol level.
Kwargs:
any (string or tuples): All kwarg keys not listed above
will be treated as send-command names and their arguments
(which can be a string or a tuple).
Notes:
`at_msg_receive` will be called on this Object.
All extra kwargs will be passed on to the protocol.
"""
# try send hooks
if from_obj:
for obj in make_iter(from_obj):
try:
obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
logger.log_trace()
kwargs["options"] = options
try:
if not self.at_msg_receive(text=text, **kwargs):
# if at_msg_receive returns false, we abort message to this object
return
except Exception:
logger.log_trace()
if text is not None:
if not (isinstance(text, basestring) or isinstance(text, tuple)):
# sanitize text before sending across the wire
try:
text = to_str(text, force_string=True)
except Exception:
text = repr(text)
kwargs['text'] = text
# relay to session(s)
sessions = make_iter(session) if session else self.sessions.all()
for session in sessions:
session.data_out(**kwargs)
def for_contents(self, func, exclude=None, **kwargs):
"""
Runs a function on every object contained within this one.
Args:
func (callable): Function to call. This must have the
formal call sign func(obj, **kwargs), where obj is the
object currently being processed and `**kwargs` are
passed on from the call to `for_contents`.
exclude (list, optional): A list of object not to call the
function on.
Kwargs:
Keyword arguments will be passed to the function for all objects.
"""
contents = self.contents
if exclude:
exclude = make_iter(exclude)
contents = [obj for obj in contents if obj not in exclude]
for obj in contents:
func(obj, **kwargs)
def msg_contents(self, text=None, exclude=None, from_obj=None, mapping=None, **kwargs):
"""
Emits a message to all objects inside this object.
Args:
text (str or tuple): Message to send. If a tuple, this should be
on the valid OOB outmessage form `(message, {kwargs})`,
where kwargs are optional data passed to the `text`
outputfunc.
exclude (list, optional): A list of objects not to send to.
from_obj (Object, optional): An object designated as the
"sender" of the message. See `DefaultObject.msg()` for
more info.
mapping (dict, optional): A mapping of formatting keys
`{"key":<object>, "key2":<object2>,...}. The keys
must match `{key}` markers in the `text` if this is a string or
in the internal `message` if `text` is a tuple. These
formatting statements will be
replaced by the return of `<object>.get_display_name(looker)`
for every looker in contents that receives the
message. This allows for every object to potentially
get its own customized string.
Kwargs:
Keyword arguments will be passed on to `obj.msg()` for all
messaged objects.
Notes:
The `mapping` argument is required if `message` contains
{}-style format syntax. The keys of `mapping` should match
named format tokens, and its values will have their
`get_display_name()` function called for each object in
the room before substitution. If an item in the mapping does
not have `get_display_name()`, its string value will be used.
Example:
Say Char is a Character object and Npc is an NPC object:
char.location.msg_contents(
"{attacker} kicks {defender}",
mapping=dict(attacker=char, defender=npc), exclude=(char, npc))
This will result in everyone in the room seeing 'Char kicks NPC'
where everyone may potentially see different results for Char and Npc
depending on the results of `char.get_display_name(looker)` and
`npc.get_display_name(looker)` for each particular onlooker
"""
# we also accept an outcommand on the form (message, {kwargs})
is_outcmd = text and is_iter(text)
inmessage = text[0] if is_outcmd else text
outkwargs = text[1] if is_outcmd and len(text) > 1 else {}
contents = self.contents
if exclude:
exclude = make_iter(exclude)
contents = [obj for obj in contents if obj not in exclude]
for obj in contents:
if mapping:
substitutions = {t: sub.get_display_name(obj)
if hasattr(sub, 'get_display_name')
else str(sub) for t, sub in mapping.items()}
outmessage = inmessage.format(**substitutions)
else:
outmessage = inmessage
obj.msg(text=(outmessage, outkwargs), from_obj=from_obj, **kwargs)
def move_to(self, destination, quiet=False,
emit_to_obj=None, use_destination=True, to_none=False, move_hooks=True,
**kwargs):
"""
Moves this object to a new location.
Args:
destination (Object): Reference to the object to move to. This
can also be an exit object, in which case the
destination property is used as destination.
quiet (bool): If true, turn off the calling of the emit hooks
(announce_move_to/from etc)
emit_to_obj (Object): object to receive error messages
use_destination (bool): Default is for objects to use the "destination"
property of destinations as the target to move to. Turning off this
keyword allows objects to move "inside" exit objects.
to_none (bool): Allow destination to be None. Note that no hooks are run when
moving to a None location. If you want to run hooks, run them manually
(and make sure they can manage None locations).
move_hooks (bool): If False, turn off the calling of move-related hooks
(at_before/after_move etc) with quiet=True, this is as quiet a move
as can be done.
Kwargs:
Passed on to announce_move_to and announce_move_from hooks.
Returns:
result (bool): True/False depending on if there were problems with the move.
This method may also return various error messages to the
`emit_to_obj`.
Notes:
No access checks are done in this method, these should be handled before
calling `move_to`.
The `DefaultObject` hooks called (if `move_hooks=True`) are, in order:
1. `self.at_before_move(destination)` (if this returns False, move is aborted)
2. `source_location.at_object_leave(self, destination)`
3. `self.announce_move_from(destination)`
4. (move happens here)
5. `self.announce_move_to(source_location)`
6. `destination.at_object_receive(self, source_location)`
7. `self.at_after_move(source_location)`
"""
def logerr(string="", err=None):
"""Simple log helper method"""
logger.log_trace()
self.msg("%s%s" % (string, "" if err is None else " (%s)" % err))
return
errtxt = _("Couldn't perform move ('%s'). Contact an admin.")
if not emit_to_obj:
emit_to_obj = self
if not destination:
if to_none:
# immediately move to None. There can be no hooks called since
# there is no destination to call them with.
self.location = None
return True
emit_to_obj.msg(_("The destination doesn't exist."))
return False
if destination.destination and use_destination:
# traverse exits
destination = destination.destination
# Before the move, call eventual pre-commands.
if move_hooks:
try:
if not self.at_before_move(destination):
return False
except Exception as err:
logerr(errtxt % "at_before_move()", err)
return False
# Save the old location
source_location = self.location
# Call hook on source location
if move_hooks and source_location:
try:
source_location.at_object_leave(self, destination)
except Exception as err:
logerr(errtxt % "at_object_leave()", err)
return False
if not quiet:
# tell the old room we are leaving
try:
self.announce_move_from(destination, **kwargs)
except Exception as err:
logerr(errtxt % "at_announce_move()", err)
return False
# Perform move
try:
self.location = destination
except Exception as err:
logerr(errtxt % "location change", err)
return False
if not quiet:
# Tell the new room we are there.
try:
self.announce_move_to(source_location, **kwargs)
except Exception as err:
logerr(errtxt % "announce_move_to()", err)
return False
if move_hooks:
# Perform eventual extra commands on the receiving location
# (the object has already arrived at this point)
try:
destination.at_object_receive(self, source_location)
except Exception as err:
logerr(errtxt % "at_object_receive()", err)
return False
# Execute eventual extra commands on this object after moving it
# (usually calling 'look')
if move_hooks:
try:
self.at_after_move(source_location)
except Exception as err:
logerr(errtxt % "at_after_move", err)
return False
return True
def clear_exits(self):
"""
Destroys all of the exits and any exits pointing to this
object as a destination.
"""
for out_exit in [exi for exi in ObjectDB.objects.get_contents(self) if exi.db_destination]:
out_exit.delete()
for in_exit in ObjectDB.objects.filter(db_destination=self):
in_exit.delete()
def clear_contents(self):
"""
Moves all objects (accounts/things) to their home location or
to default home.
"""
# Gather up everything that thinks this is its location.
default_home_id = int(settings.DEFAULT_HOME.lstrip("#"))
try:
default_home = ObjectDB.objects.get(id=default_home_id)
if default_home.dbid == self.dbid:
# we are deleting default home!
default_home = None
except Exception:
string = _("Could not find default home '(#%d)'.")
logger.log_err(string % default_home_id)
default_home = None
for obj in self.contents:
home = obj.home
# Obviously, we can't send it back to here.
if not home or (home and home.dbid == self.dbid):
obj.home = default_home
home = default_home
# If for some reason it's still None...
if not home:
string = "Missing default home, '%s(#%d)' "
string += "now has a null location."
obj.location = None
obj.msg(_("Something went wrong! You are dumped into nowhere. Contact an admin."))
logger.log_err(string % (obj.name, obj.dbid))
return
if obj.has_account:
if home:
string = "Your current location has ceased to exist,"
string += " moving you to %s(#%d)."
obj.msg(_(string) % (home.name, home.dbid))
else:
# Famous last words: The account should never see this.
string = "This place should not exist ... contact an admin."
obj.msg(_(string))
obj.move_to(home)
def copy(self, new_key=None):
"""
Makes an identical copy of this object, identical except for a
new dbref in the database. If you want to customize the copy
by changing some settings, use ObjectDB.object.copy_object()
directly.
Args:
new_key (string): New key/name of copied object. If new_key is not
specified, the copy will be named <old_key>_copy by default.
Returns:
copy (Object): A copy of this object.
"""
def find_clone_key():
"""
Append 01, 02 etc to obj.key. Checks next higher number in the
same location, then adds the next number available
returns the new clone name on the form keyXX
"""
key = self.key
num = sum(1 for obj in self.location.contents
if obj.key.startswith(key) and obj.key.lstrip(key).isdigit())
return "%s%03i" % (key, num)
new_key = new_key or find_clone_key()
return ObjectDB.objects.copy_object(self, new_key=new_key)
def delete(self):
"""
Deletes this object. Before deletion, this method makes sure
to move all contained objects to their respective home
locations, as well as clean up all exits to/from the object.
Returns:
noerror (bool): Returns whether or not the delete completed
successfully or not.
"""
global _ScriptDB
if not _ScriptDB:
from evennia.scripts.models import ScriptDB as _ScriptDB
if not self.pk or not self.at_object_delete():
# This object has already been deleted,
# or the pre-delete check return False
return False
# See if we need to kick the account off.
for session in self.sessions.all():
session.msg(_("Your character %s has been destroyed.") % self.key)
# no need to disconnect, Account just jumps to OOC mode.
# sever the connection (important!)
if self.account:
for session in self.sessions.all():
self.account.unpuppet_object(session)
self.account = None
for script in _ScriptDB.objects.get_all_scripts_on_obj(self):
script.stop()
# Destroy any exits to and from this room, if any
self.clear_exits()
# Clear out any non-exit objects located within the object
self.clear_contents()
self.attributes.clear()
self.nicks.clear()
self.aliases.clear()
self.location = None # this updates contents_cache for our location
# Perform the deletion of the object
super(DefaultObject, self).delete()
return True
def access(self, accessing_obj, access_type='read', default=False, no_superuser_bypass=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
Args:
accessing_obj (Object): Object trying to access this one.
access_type (str, optional): Type of access sought.
default (bool, optional): What to return if no lock of access_type was found.
no_superuser_bypass (bool, optional): If `True`, don't skip
lock check for superuser (be careful with this one).
Kwargs:
Passed on to the at_access hook along with the result of the access check.
"""
result = super(DefaultObject, self).access(accessing_obj, access_type=access_type,
default=default, no_superuser_bypass=no_superuser_bypass)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
#
# Hook methods
#
def at_first_save(self):
"""
This is called by the typeclass system whenever an instance of
this class is saved for the first time. It is a generic hook
for calling the startup hooks for the various game entities.
When overloading you generally don't overload this but
overload the hooks called by this method.
"""
self.basetype_setup()
self.at_object_creation()
if hasattr(self, "_createdict"):
# this will only be set if the utils.create function
# was used to create the object. We want the create
# call's kwargs to override the values set by hooks.
cdict = self._createdict
updates = []
if not cdict.get("key"):
if not self.db_key:
self.db_key = "#%i" % self.dbid
updates.append("db_key")
elif self.key != cdict.get("key"):
updates.append("db_key")
self.db_key = cdict["key"]
if cdict.get("location") and self.location != cdict["location"]:
self.db_location = cdict["location"]
updates.append("db_location")
if cdict.get("home") and self.home != cdict["home"]:
self.home = cdict["home"]
updates.append("db_home")
if cdict.get("destination") and self.destination != cdict["destination"]:
self.destination = cdict["destination"]
updates.append("db_destination")
if updates:
self.save(update_fields=updates)
if cdict.get("permissions"):
self.permissions.batch_add(*cdict["permissions"])
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("aliases"):
self.aliases.batch_add(*cdict["aliases"])
if cdict.get("location"):
cdict["location"].at_object_receive(self, None)
self.at_after_move(None)
if cdict.get("tags"):
# this should be a list of tags, tuples (key, category) or (key, category, data)
self.tags.batch_add(*cdict["tags"])
if cdict.get("attributes"):
# this should be tuples (key, val, ...)
self.attributes.batch_add(*cdict["attributes"])
if cdict.get("nattributes"):
# this should be a dict of nattrname:value
for key, value in cdict["nattributes"]:
self.nattributes.add(key, value)
del self._createdict
self.basetype_posthook_setup()
# hooks called by the game engine #
def basetype_setup(self):
"""
This sets up the default properties of an Object, just before
the more general at_object_creation.
You normally don't need to change this unless you change some
fundamental things like names of permission groups.
"""
# the default security setup fallback for a generic
# object. Overload in child for a custom setup. Also creation
# commands may set this (create an item and you should be its
# controller, for example)
self.locks.add(";".join([
"control:perm(Developer)", # edit locks/permissions, delete
"examine:perm(Builder)", # examine properties
"view:all()", # look at object (visibility)
"edit:perm(Admin)", # edit properties/attributes
"delete:perm(Admin)", # delete object
"get:all()", # pick up object
"call:true()", # allow to call commands on this object
"tell:perm(Admin)", # allow emits to this object
"puppet:pperm(Developer)"])) # lock down puppeting only to staff by default
def basetype_posthook_setup(self):
"""
Called once, after basetype_setup and at_object_creation. This
should generally not be overloaded unless you are redefining
how a room/exit/object works. It allows for basetype-like
setup after the object is created. An example of this is
EXITs, who need to know keys, aliases, locks etc to set up
their exit-cmdsets.
"""
pass
def at_object_creation(self):
"""
Called once, when this object is first created. This is the
normal hook to overload for most object types.
"""
pass
def at_object_delete(self):
"""
Called just before the database object is permanently
delete()d from the database. If this method returns False,
deletion is aborted.
"""
return True
def at_init(self):
"""
This is always called whenever this object is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the object is used or activated
in some way after being created but also after each server
restart or reload.
"""
pass
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this object are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the object currently
have no cmdsets.
Kwargs:
caller (Session, Object or Account): The caller requesting
this cmdset.
"""
pass
def at_pre_puppet(self, account, session=None, **kwargs):
"""
Called just before an Account connects to this object to puppet
it.
Args:
account (Account): This is the connecting account.
session (Session): Session controlling the connection.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_post_puppet(self, **kwargs):
"""
Called just after puppeting has been completed and all
Account<->Object links have been established.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Note:
You can use `self.account` and `self.sessions.get()` to get
account and sessions at this point; the last entry in the
list from `self.sessions.get()` is the latest Session
puppeting this Object.
"""
self.account.db._last_puppet = self
def at_pre_unpuppet(self, **kwargs):
"""
Called just before beginning to un-connect a puppeting from
this Account.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Note:
You can use `self.account` and `self.sessions.get()` to get
account and sessions at this point; the last entry in the
list from `self.sessions.get()` is the latest Session
puppeting this Object.
"""
pass
def at_post_unpuppet(self, account, session=None, **kwargs):
"""
Called just after the Account successfully disconnected from
this object, severing all connections.
Args:
account (Account): The account object that just disconnected
from this object.
session (Session): Session id controlling the connection that
just disconnected.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
def at_access(self, result, accessing_obj, access_type, **kwargs):
"""
This is called with the result of an access call, along with
any kwargs used for that call. The return of this method does
not affect the result of the lock check. It can be used e.g. to
customize error messages in a central location or other effects
based on the access result.
Args:
result (bool): The outcome of the access call.
accessing_obj (Object or Account): The entity trying to gain access.
access_type (str): The type of access that was requested.
Kwargs:
Not used by default, added for possible expandability in a
game.
"""
pass
# hooks called when moving the object
def at_before_move(self, destination, **kwargs):
"""
Called just before starting to move this object to
destination.
Args:
destination (Object): The object we are moving to
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shouldmove (bool): If we should move or not.
Notes:
If this method returns False/None, the move is cancelled
before it is even started.
"""
# return has_perm(self, destination, "can_move")
return True
def announce_move_from(self, destination, msg=None, mapping=None, **kwargs):
"""
Called if the move is to be announced. This is
called while we are still standing in the old
location.
Args:
destination (Object): The place we are going to.
msg (str, optional): a replacement message.
mapping (dict, optional): additional mapping objects.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
You can override this method and call its parent with a
message to simply change the default message. In the string,
you can use the following as mappings (between braces):
object: the object which is moving.
exit: the exit from which the object is moving (if found).
origin: the location of the object before the move.
destination: the location of the object after moving.
"""
if not self.location:
return
if msg:
string = msg
else:
string = "{object} is leaving {origin}, heading for {destination}."
location = self.location
exits = [o for o in location.contents if o.location is location and o.destination is destination]
if not mapping:
mapping = {}
mapping.update({
"object": self,
"exit": exits[0] if exits else "somewhere",
"origin": location or "nowhere",
"destination": destination or "nowhere",
})
location.msg_contents(string, exclude=(self, ), mapping=mapping)
def announce_move_to(self, source_location, msg=None, mapping=None, **kwargs):
"""
Called after the move if the move was not quiet. At this point
we are standing in the new location.
Args:
source_location (Object): The place we came from
msg (str, optional): the replacement message if location.
mapping (dict, optional): additional mapping objects.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
You can override this method and call its parent with a
message to simply change the default message. In the string,
you can use the following as mappings (between braces):
object: the object which is moving.
exit: the exit from which the object is moving (if found).
origin: the location of the object before the move.
destination: the location of the object after moving.
"""
if not source_location and self.location.has_account:
# This was created from nowhere and added to an account's
# inventory; it's probably the result of a create command.
string = "You now have %s in your possession." % self.get_display_name(self.location)
self.location.msg(string)
return
if source_location:
if msg:
string = msg
else:
string = "{object} arrives to {destination} from {origin}."
else:
string = "{object} arrives to {destination}."
origin = source_location
destination = self.location
exits = []
if origin:
exits = [o for o in destination.contents if o.location is destination and o.destination is origin]
if not mapping:
mapping = {}
mapping.update({
"object": self,
"exit": exits[0] if exits else "somewhere",
"origin": origin or "nowhere",
"destination": destination or "nowhere",
})
destination.msg_contents(string, exclude=(self, ), mapping=mapping)
def at_after_move(self, source_location, **kwargs):
"""
Called after move has completed, regardless of quiet mode or
not. Allows changes to the object due to the location it is
now in.
Args:
source_location (Object): Wwhere we came from. This may be `None`.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_object_leave(self, moved_obj, target_location, **kwargs):
"""
Called just before an object leaves from inside this object
Args:
moved_obj (Object): The object leaving
target_location (Object): Where `moved_obj` is going.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_object_receive(self, moved_obj, source_location, **kwargs):
"""
Called after an object has been moved into this object.
Args:
moved_obj (Object): The object moved into this one
source_location (Object): Where `moved_object` came from.
Note that this could be `None`.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_traverse(self, traversing_object, target_location, **kwargs):
"""
This hook is responsible for handling the actual traversal,
normally by calling
`traversing_object.move_to(target_location)`. It is normally
only implemented by Exit objects. If it returns False (usually
because `move_to` returned False), `at_after_traverse` below
should not be called and instead `at_failed_traverse` should be
called.
Args:
traversing_object (Object): Object traversing us.
target_location (Object): Where target is going.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_after_traverse(self, traversing_object, source_location, **kwargs):
"""
Called just after an object successfully used this object to
traverse to another object (i.e. this object is a type of
Exit)
Args:
traversing_object (Object): The object traversing us.
source_location (Object): Where `traversing_object` came from.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
The target location should normally be available as `self.destination`.
"""
pass
def at_failed_traverse(self, traversing_object, **kwargs):
"""
This is called if an object fails to traverse this object for
some reason.
Args:
traversing_object (Object): The object that failed traversing us.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
Using the default exits, this hook will not be called if an
Attribute `err_traverse` is defined - this will in that case be
read for an error string instead.
"""
pass
def at_msg_receive(self, text=None, from_obj=None, **kwargs):
"""
This hook is called whenever someone sends a message to this
object using the `msg` method.
Note that from_obj may be None if the sender did not include
itself as an argument to the obj.msg() call - so you have to
check for this. .
Consider this a pre-processing method before msg is passed on
to the user session. If this method returns False, the msg
will not be passed on.
Args:
text (str, optional): The message received.
from_obj (any, optional): The object sending the message.
Kwargs:
This includes any keywords sent to the `msg` method.
Returns:
receive (bool): If this message should be received.
Notes:
If this method returns False, the `msg` operation
will abort without sending the message.
"""
return True
def at_msg_send(self, text=None, to_obj=None, **kwargs):
"""
This is a hook that is called when *this* object sends a
message to another object with `obj.msg(text, to_obj=obj)`.
Args:
text (str, optional): Text to send.
to_obj (any, optional): The object to send to.
Kwargs:
Keywords passed from msg()
Notes:
Since this method is executed by `from_obj`, if no `from_obj`
was passed to `DefaultCharacter.msg` this hook will never
get called.
"""
pass
# hooks called by the default cmdset.
def return_appearance(self, looker, **kwargs):
"""
This formats a description. It is the hook a 'look' command
should call.
Args:
looker (Object): Object doing the looking.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if not looker:
return ""
# get and identify all objects
visible = (con for con in self.contents if con != looker and
con.access(looker, "view"))
exits, users, things = [], [], defaultdict(list)
for con in visible:
key = con.get_display_name(looker)
if con.destination:
exits.append(key)
elif con.has_account:
users.append("|c%s|n" % key)
else:
# things can be pluralized
things[key].append(con)
# get description, build string
string = "|c%s|n\n" % self.get_display_name(looker)
desc = self.db.desc
if desc:
string += "%s" % desc
if exits:
string += "\n|wExits:|n " + list_to_string(exits)
if users or things:
# handle pluralization of things (never pluralize users)
thing_strings = []
for key, itemlist in sorted(things.iteritems()):
nitem = len(itemlist)
if nitem == 1:
key, _ = itemlist[0].get_numbered_name(nitem, looker, key=key)
else:
key = [item.get_numbered_name(nitem, looker, key=key)[1] for item in itemlist][0]
thing_strings.append(key)
string += "\n|wYou see:|n " + list_to_string(users + thing_strings)
return string
def at_look(self, target, **kwargs):
"""
Called when this object performs a look. It allows to
customize just what this means. It will not itself
send any data.
Args:
target (Object): The target being looked at. This is
commonly an object or the current location. It will
be checked for the "view" type access.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
lookstring (str): A ready-processed look string
potentially ready to return to the looker.
"""
if not target.access(self, "view"):
try:
return "Could not view '%s'." % target.get_display_name(self)
except AttributeError:
return "Could not view '%s'." % target.key
description = target.return_appearance(self)
# the target's at_desc() method.
# this must be the last reference to target so it may delete itself when acted on.
target.at_desc(looker=self)
return description
def at_desc(self, looker=None, **kwargs):
"""
This is called whenever someone looks at this object.
Args:
looker (Object, optional): The object requesting the description.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_before_get(self, getter, **kwargs):
"""
Called by the default `get` command before this object has been
picked up.
Args:
getter (Object): The object about to get this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shouldget (bool): If the object should be gotten or not.
Notes:
If this method returns False/None, the getting is cancelled
before it is even started.
"""
return True
def at_get(self, getter, **kwargs):
"""
Called by the default `get` command when this object has been
picked up.
Args:
getter (Object): The object getting this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
This hook cannot stop the pickup from happening. Use
permissions or the at_before_get() hook for that.
"""
pass
def at_before_give(self, giver, getter, **kwargs):
"""
Called by the default `give` command before this object has been
given.
Args:
giver (Object): The object about to give this object.
getter (Object): The object about to get this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shouldgive (bool): If the object should be given or not.
Notes:
If this method returns False/None, the giving is cancelled
before it is even started.
"""
return True
def at_give(self, giver, getter, **kwargs):
"""
Called by the default `give` command when this object has been
given.
Args:
giver (Object): The object giving this object.
getter (Object): The object getting this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
This hook cannot stop the give from happening. Use
permissions or the at_before_give() hook for that.
"""
pass
def at_before_drop(self, dropper, **kwargs):
"""
Called by the default `drop` command before this object has been
dropped.
Args:
dropper (Object): The object which will drop this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shoulddrop (bool): If the object should be dropped or not.
Notes:
If this method returns False/None, the dropping is cancelled
before it is even started.
"""
return True
def at_drop(self, dropper, **kwargs):
"""
Called by the default `drop` command when this object has been
dropped.
Args:
dropper (Object): The object which just dropped this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
This hook cannot stop the drop from happening. Use
permissions or the at_before_drop() hook for that.
"""
pass
def at_before_say(self, message, **kwargs):
"""
Before the object says something.
This hook is by default used by the 'say' and 'whisper'
commands as used by this command it is called before the text
is said/whispered and can be used to customize the outgoing
text from the object. Returning `None` aborts the command.
Args:
message (str): The suggested say/whisper text spoken by self.
Kwargs:
whisper (bool): If True, this is a whisper rather than
a say. This is sent by the whisper command by default.
Other verbal commands could use this hook in similar
ways.
receivers (Object or iterable): If set, this is the target or targets for the say/whisper.
Returns:
message (str): The (possibly modified) text to be spoken.
"""
return message
def at_say(self, message, msg_self=None, msg_location=None,
receivers=None, msg_receivers=None, **kwargs):
"""
Display the actual say (or whisper) of self.
This hook should display the actual say/whisper of the object in its
location. It should both alert the object (self) and its
location that some text is spoken. The overriding of messages or
`mapping` allows for simple customization of the hook without
re-writing it completely.
Args:
message (str): The message to convey.
msg_self (bool or str, optional): If boolean True, echo `message` to self. If a string,
return that message. If False or unset, don't echo to self.
msg_location (str, optional): The message to echo to self's location.
receivers (Object or iterable, optional): An eventual receiver or receivers of the message
(by default only used by whispers).
msg_receivers(str): Specific message to pass to the receiver(s). This will parsed
with the {receiver} placeholder replaced with the given receiver.
Kwargs:
whisper (bool): If this is a whisper rather than a say. Kwargs
can be used by other verbal commands in a similar way.
mapping (dict): Pass an additional mapping to the message.
Notes:
Messages can contain {} markers. These are substituted against the values
passed in the `mapping` argument.
msg_self = 'You say: "{speech}"'
msg_location = '{object} says: "{speech}"'
msg_receivers = '{object} whispers: "{speech}"'
Supported markers by default:
{self}: text to self-reference with (default 'You')
{speech}: the text spoken/whispered by self.
{object}: the object speaking.
{receiver}: replaced with a single receiver only for strings meant for a specific
receiver (otherwise 'None').
{all_receivers}: comma-separated list of all receivers,
if more than one, otherwise same as receiver
{location}: the location where object is.
"""
msg_type = 'say'
if kwargs.get("whisper", False):
# whisper mode
msg_type = 'whisper'
msg_self = '{self} whisper to {all_receivers}, "{speech}"' if msg_self is True else msg_self
msg_receivers = '{object} whispers: "{speech}"'
msg_receivers = msg_receivers or '{object} whispers: "{speech}"'
msg_location = None
else:
msg_self = '{self} say, "{speech}"' if msg_self is True else msg_self
msg_location = msg_location or '{object} says, "{speech}"'
msg_receivers = msg_receivers or message
custom_mapping = kwargs.get('mapping', {})
receivers = make_iter(receivers) if receivers else None
location = self.location
if msg_self:
self_mapping = {"self": "You",
"object": self.get_display_name(self),
"location": location.get_display_name(self) if location else None,
"receiver": None,
"all_receivers": ", ".join(
recv.get_display_name(self)
for recv in receivers) if receivers else None,
"speech": message}
self_mapping.update(custom_mapping)
self.msg(text=(msg_self.format(**self_mapping), {"type": msg_type}), from_obj=self)
if receivers and msg_receivers:
receiver_mapping = {"self": "You",
"object": None,
"location": None,
"receiver": None,
"all_receivers": None,
"speech": message}
for receiver in make_iter(receivers):
individual_mapping = {"object": self.get_display_name(receiver),
"location": location.get_display_name(receiver),
"receiver": receiver.get_display_name(receiver),
"all_receivers": ", ".join(
recv.get_display_name(recv)
for recv in receivers) if receivers else None}
receiver_mapping.update(individual_mapping)
receiver_mapping.update(custom_mapping)
receiver.msg(text=(msg_receivers.format(**receiver_mapping),
{"type": msg_type}), from_obj=self)
if self.location and msg_location:
location_mapping = {"self": "You",
"object": self,
"location": location,
"all_receivers": ", ".join(str(recv) for recv in receivers) if receivers else None,
"receiver": None,
"speech": message}
location_mapping.update(custom_mapping)
exclude = []
if msg_self:
exclude.append(self)
if receivers:
exclude.extend(receivers)
self.location.msg_contents(text=(msg_location, {"type": msg_type}),
from_obj=self,
exclude=exclude,
mapping=location_mapping)
#
# Base Character object
#
class DefaultCharacter(DefaultObject):
"""
This implements an Object puppeted by a Session - that is,
a character avatar controlled by an account.
"""
def basetype_setup(self):
"""
Setup character-specific security.
You should normally not need to overload this, but if you do,
make sure to reproduce at least the two last commands in this
method (unless you want to fundamentally change how a
Character object works).
"""
super(DefaultCharacter, self).basetype_setup()
self.locks.add(";".join(["get:false()", # noone can pick up the character
"call:false()"])) # no commands can be called on character from outside
# add the default cmdset
self.cmdset.add_default(settings.CMDSET_CHARACTER, permanent=True)
def at_after_move(self, source_location, **kwargs):
"""
We make sure to look around after a move.
"""
if self.location.access(self, "view"):
self.msg(self.at_look(self.location))
def at_pre_puppet(self, account, session=None, **kwargs):
"""
Return the character from storage in None location in `at_post_unpuppet`.
Args:
account (Account): This is the connecting account.
session (Session): Session controlling the connection.
"""
if self.location is None: # Make sure character's location is never None before being puppeted.
# Return to last location (or home, which should always exist),
self.location = self.db.prelogout_location if self.db.prelogout_location else self.home
self.location.at_object_receive(self, None) # and trigger the location's reception hook.
if self.location: # If the character is verified to be somewhere,
self.db.prelogout_location = self.location # save location again to be sure.
else:
account.msg("|r%s has no location and no home is set.|n" % self, session=session) # Note to set home.
def at_post_puppet(self, **kwargs):
"""
Called just after puppeting has been completed and all
Account<->Object links have been established.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Note:
You can use `self.account` and `self.sessions.get()` to get
account and sessions at this point; the last entry in the
list from `self.sessions.get()` is the latest Session
puppeting this Object.
"""
# NOTE: commenting out extraneous info
#self.msg("\nYou become |c%s|n.\n" % self.name)
self.msg((self.at_look(self.location), {'type':'look'}), options = None)
def message(obj, from_obj):
obj.msg("%s has entered the game." % self.get_display_name(obj), from_obj=from_obj)
self.location.for_contents(message, exclude=[self], from_obj=self)
def at_post_unpuppet(self, account, session=None, **kwargs):
"""
We stove away the character when the account goes ooc/logs off,
otherwise the character object will remain in the room also
after the account logged off ("headless", so to say).
Args:
account (Account): The account object that just disconnected
from this object.
session (Session): Session controlling the connection that
just disconnected.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if not self.sessions.count():
# only remove this char from grid if no sessions control it anymore.
if self.location:
def message(obj, from_obj):
obj.msg("%s has left the game." % self.get_display_name(obj), from_obj=from_obj)
self.location.for_contents(message, exclude=[self], from_obj=self)
self.db.prelogout_location = self.location
self.location = None
@property
def idle_time(self):
"""
Returns the idle time of the least idle session in seconds. If
no sessions are connected it returns nothing.
"""
idle = [session.cmd_last_visible for session in self.sessions.all()]
if idle:
return time.time() - float(max(idle))
return None
@property
def connection_time(self):
"""
Returns the maximum connection time of all connected sessions
in seconds. Returns nothing if there are no sessions.
"""
conn = [session.conn_time for session in self.sessions.all()]
if conn:
return time.time() - float(min(conn))
return None
#
# Base Room object
class DefaultRoom(DefaultObject):
"""
This is the base room object. It's just like any Object except its
location is always `None`.
"""
def basetype_setup(self):
"""
Simple room setup setting locks to make sure the room
cannot be picked up.
"""
super(DefaultRoom, self).basetype_setup()
self.locks.add(";".join(["get:false()",
"puppet:false()"])) # would be weird to puppet a room ...
self.location = None
#
# Default Exit command, used by the base exit object
#
class ExitCommand(command.Command):
"""
This is a command that simply cause the caller to traverse
the object it is attached to.
"""
obj = None
def func(self):
"""
Default exit traverse if no syscommand is defined.
"""
if self.obj.access(self.caller, 'traverse'):
# we may traverse the exit.
self.obj.at_traverse(self.caller, self.obj.destination)
else:
# exit is locked
if self.obj.db.err_traverse:
# if exit has a better error message, let's use it.
self.caller.msg(self.obj.db.err_traverse)
else:
# No shorthand error message. Call hook.
self.obj.at_failed_traverse(self.caller)
def get_extra_info(self, caller, **kwargs):
"""
Shows a bit of information on where the exit leads.
Args:
caller (Object): The object (usually a character) that entered an ambiguous command.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
A string with identifying information to disambiguate the command, conventionally with a preceding space.
"""
if self.obj.destination:
return " (exit to %s)" % self.obj.destination.get_display_name(caller)
else:
return " (%s)" % self.obj.get_display_name(caller)
#
# Base Exit object
class DefaultExit(DefaultObject):
"""
This is the base exit object - it connects a location to another.
This is done by the exit assigning a "command" on itself with the
same name as the exit object (to do this we need to remember to
re-create the command when the object is cached since it must be
created dynamically depending on what the exit is called). This
command (which has a high priority) will thus allow us to traverse
exits simply by giving the exit-object's name on its own.
"""
exit_command = ExitCommand
priority = 101
# Helper classes and methods to implement the Exit. These need not
# be overloaded unless one want to change the foundation for how
# Exits work. See the end of the class for hook methods to overload.
def create_exit_cmdset(self, exidbobj):
"""
Helper function for creating an exit command set + command.
The command of this cmdset has the same name as the Exit
object and allows the exit to react when the account enter the
exit's name, triggering the movement between rooms.
Args:
exidbobj (Object): The DefaultExit object to base the command on.
"""
# create an exit command. We give the properties here,
# to always trigger metaclass preparations
cmd = self.exit_command(key=exidbobj.db_key.strip().lower(),
aliases=exidbobj.aliases.all(),
locks=str(exidbobj.locks),
auto_help=False,
destination=exidbobj.db_destination,
arg_regex=r"^$",
is_exit=True,
obj=exidbobj)
# create a cmdset
exit_cmdset = cmdset.CmdSet(None)
exit_cmdset.key = 'ExitCmdSet'
exit_cmdset.priority = self.priority
exit_cmdset.duplicates = True
# add command to cmdset
exit_cmdset.add(cmd)
return exit_cmdset
# Command hooks
def basetype_setup(self):
"""
Setup exit-security
You should normally not need to overload this - if you do make
sure you include all the functionality in this method.
"""
super(DefaultExit, self).basetype_setup()
# setting default locks (overload these in at_object_creation()
self.locks.add(";".join(["puppet:false()", # would be weird to puppet an exit ...
"traverse:all()", # who can pass through exit by default
"get:false()"])) # noone can pick up the exit
# an exit should have a destination (this is replaced at creation time)
if self.location:
self.destination = self.location
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this object are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the object currently
has no cmdsets.
Kwargs:
force_init (bool): If `True`, force a re-build of the cmdset
(for example to update aliases).
"""
if "force_init" in kwargs or not self.cmdset.has_cmdset("ExitCmdSet", must_be_default=True):
# we are resetting, or no exit-cmdset was set. Create one dynamically.
self.cmdset.add_default(self.create_exit_cmdset(self), permanent=False)
def at_init(self):
"""
This is called when this objects is re-loaded from cache. When
that happens, we make sure to remove any old ExitCmdSet cmdset
(this most commonly occurs when renaming an existing exit)
"""
self.cmdset.remove_default()
def at_traverse(self, traversing_object, target_location, **kwargs):
"""
This implements the actual traversal. The traverse lock has
already been checked (in the Exit command) at this point.
Args:
traversing_object (Object): Object traversing us.
target_location (Object): Where target is going.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
source_location = traversing_object.location
if traversing_object.move_to(target_location):
self.at_after_traverse(traversing_object, source_location)
else:
if self.db.err_traverse:
# if exit has a better error message, let's use it.
self.caller.msg(self.db.err_traverse)
else:
# No shorthand error message. Call hook.
self.at_failed_traverse(traversing_object)
def at_failed_traverse(self, traversing_object, **kwargs):
"""
Overloads the default hook to implement a simple default error message.
Args:
traversing_object (Object): The object that failed traversing us.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
Using the default exits, this hook will not be called if an
Attribute `err_traverse` is defined - this will in that case be
read for an error string instead.
"""
traversing_object.msg("You cannot go there.")
|
from django.contrib import admin
from .models import Preference, Profile, Allergy, Goal
admin.site.register(Preference)
admin.site.register(Profile)
admin.site.register(Allergy)
admin.site.register(Goal)
|
from rest_framework import serializers
import markdown2
from .models import Content
from omaralbeik import server_variables as sv
class ContentSerializer(serializers.ModelSerializer):
tags = serializers.SerializerMethodField()
html_text = serializers.SerializerMethodField()
website_url = serializers.SerializerMethodField()
meta = serializers.SerializerMethodField()
class Meta:
model = Content
fields = (
"id",
"title",
"slug",
"image_url",
"summary",
"text",
"html_text",
"website_url",
"tags",
"meta",
)
# return content's web URL.
def get_website_url(self, content):
return "{}/{}".format(sv.CLIENT_PROD_URL, content.slug)
# return content's text as HTML
def get_html_text(self, content):
return markdown2.markdown(
content.text, extras=["target-blank-links", "fenced-code-blocks"]
)
# return content's tags.
def get_tags(self, content):
return content.tags.all().values("name", "slug")
# return content's meta fields.
def get_meta(self, content):
return {
"title": content.title,
"description": content.summary,
"keywords": ", ".join([tag.name for tag in content.tags.all()]),
"canonical": self.get_website_url(content),
}
|
#!/usr/bin/env python
# coding: utf-8
# # GenCode Explore
#
# Explore the human RNA sequences from GenCode.
#
# Assume user downloaded files from GenCode 38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/)
# to a subdirectory called data.
#
# Move the GenCodeLoader class to its own python module. Compare to 105.
# In[1]:
import time
def show_time():
t = time.time()
s = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
print(s)
show_time()
# In[2]:
import numpy as np
import pandas as pd
import sys
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(s.text) # writes to cloud local, delete the file later?
from RNA_describe import ORF_counter
from RNA_describe import assert_imported_RNA_describe
from GenCodeTools import GenCodeLoader
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='../data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import ORF_counter
from SimTools.RNA_describe import assert_imported_RNA_describe
from SimTools.GenCodeTools import GenCodeLoader
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_describe():
print("ERROR: Cannot use RNA_describe.")
# In[3]:
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
# ## Load the GenCode data.
# Warning: GenCode has
# over 100K protein-coding RNA (mRNA)
# and almost 50K non-coding RNA (lncRNA).
# In[4]:
# Full GenCode ver 38 human is 106143 pc + 48752 nc and loads in 7 sec.
# Expect fewer transcripts if special filtering is used.
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
loader=GenCodeLoader()
show_time()
loader.set_label(1)
loader.set_check_list(None)
loader.set_check_utr(True)
pcdf=loader.load_file(PC_FULLPATH)
print("PC seqs loaded:",len(pcdf))
show_time()
loader.set_label(0)
loader.set_check_list(None)
loader.set_check_utr(False)
ncdf=loader.load_file(NC_FULLPATH)
print("NC seqs loaded:",len(ncdf))
show_time()
# In[5]:
print("Sorting PC...")
pcdf.sort_values('seqlen', ascending=True, inplace=True)
print("Sorting NC...")
ncdf.sort_values('seqlen', ascending=True, inplace=True)
# In[6]:
ncdf
# ## Look for short ORFs
# In[7]:
def show_short(df,too_short):
oc = ORF_counter()
count=len(df)
shorties=0
for pos in range(0,count):
sequence=df.iloc[pos]['sequence']
seqlen=df.iloc[pos]['seqlen']
oc.set_sequence(sequence)
orflen=oc.get_max_orf_len()
seqlen=df.iloc[pos]['seqlen']
if seqlen>200 and orflen<=TOO_SHORT:
seqid=df.iloc[pos]['tid']
#print("%s len=%d orf=%d"%(seqid,seqlen,orflen))
shorties += 1
if pos%10000==0:
print("Up to position %d, we have %d shorter than %d"%(pos,shorties,too_short))
print("After all %d, we have %d shorter than %d"%(count,shorties,too_short))
TOO_SHORT=60
show_short(pcdf,TOO_SHORT)
# In[8]:
show_short(ncdf,TOO_SHORT)
# ## Conclusion
# With TOO_SHORT=30
# NON-CODING
# We have 589 shorter than 30, with most of them (504) shorter than 10000
#
# CODING
# Using check_utr and check_list on pcdf, we have 0 shorter than 30.
# Using check_utr only, we have 0 shorter than 30.
#
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Contrib neural network module."""
from . import nn
from . import rnn
from . import data
|
from __future__ import absolute_import
import numpy as np
from scipy.spatial import Delaunay
from spektral.utils import label_to_one_hot, numpy_to_nx
RETURN_TYPES = {'numpy', 'networkx'}
MAX_K = 7 # Maximum number of nodes in a graph
def generate_data(return_type='networkx', classes=0, n_samples_in_class=1000,
n_nodes=7, support_low=0., support_high=10., drift_amount=1.0,
one_hot_labels=True, support=None, seed=None):
"""
Generates a dataset of Delaunay triangulations as described by
[Zambon et al. (2017)](https://arxiv.org/abs/1706.06941).
Note that this function is basically deprecated and will change soon.
:param return_type: `'networkx'` or `'numpy'`, data format to return;
:param classes: indices of the classes to load (integer, or list of integers
between 0 and 20);
:param n_samples_in_class: number of generated samples per class;
:param n_nodes: number of nodes in a graph;
:param support_low: lower bound of the uniform distribution from which the
support is generated;
:param support_high: upper bound of the uniform distribution from which the
support is generated;
:param drift_amount: coefficient to control the amount of change between
classes;
:param one_hot_labels: one-hot encode dataset labels;
:param support: custom support to use instead of generating it randomly;
:param seed: random numpy seed;
:return: if `return_type='networkx'`, a list of graphs in Networkx format,
and an array containing labels; if `return_type='numpy'`, the adjacency
matrix, node features, and an array containing labels.
"""
if return_type not in RETURN_TYPES:
raise ValueError('Possible return_type: {}'.format(RETURN_TYPES))
if isinstance(classes, int):
classes = [classes]
if max(classes) > 20 or min(classes) < 0:
raise ValueError('Class indices must be between 0 and 20')
r_classes = list(reversed(classes))
if r_classes[-1] == 0:
r_classes.insert(0, r_classes.pop(-1))
# Support points
np.random.seed(seed)
if support is None:
support = np.random.uniform(support_low, support_high, (1, n_nodes, 2))
else:
try:
assert support.shape == (1, n_nodes, 2)
except AssertionError:
print('The given support doesn\'t have shape (1, n_nodes, 2) as'
'expected. Attempting to reshape.')
support = support.reshape(1, n_nodes, 2)
# Compute node features
node_features = []
# Other node features
for idx, i in enumerate(r_classes):
if i == 0:
concept_0 = np.repeat(support, n_samples_in_class, 0)
noise_0 = np.random.normal(0, 1, (n_samples_in_class, n_nodes, 2))
class_0 = concept_0 + noise_0
node_features.append(class_0)
else:
radius = 10. * ((2./3.) ** (drift_amount * (i - 1)))
phase = np.random.uniform(0, 2 * np.pi, (n_nodes, 1))
perturb_i_x = radius * np.cos(phase)
perturb_i_y = radius * np.sin(phase)
perturb_i = np.concatenate((perturb_i_x, perturb_i_y), axis=-1)
support_i = support + perturb_i
concept_i = np.repeat(support_i, n_samples_in_class, 0)
noise_i = np.random.normal(0, 1, (n_samples_in_class, n_nodes, 2))
class_i = concept_i + noise_i
node_features.append(class_i)
node_features = np.array(node_features).reshape((-1, n_nodes, 2))
# Compute adjacency matrices
adjacency = []
for nf in node_features:
adj = compute_adj(nf)
adjacency.append(adj)
adjacency = np.array(adjacency)
# Compute labels
labels = np.repeat(classes, n_samples_in_class)
if one_hot_labels:
labels = label_to_one_hot(labels, labels=classes)
if return_type is 'numpy':
return adjacency, node_features, labels
elif return_type is 'networkx':
graphs = numpy_to_nx(adjacency, node_features=node_features, nf_name='coords')
return graphs, labels
else:
raise NotImplementedError
def compute_adj(x):
"""
Computes the Delaunay triangulation of the given points
:param x: array of shape (num_nodes, 2)
:return: the computed adjacency matrix
"""
tri = Delaunay(x)
edges_explicit = np.concatenate((tri.vertices[:, :2],
tri.vertices[:, 1:],
tri.vertices[:, ::2]), axis=0)
adj = np.zeros((x.shape[0], x.shape[0]))
adj[edges_explicit[:, 0], edges_explicit[:, 1]] = 1.
return np.clip(adj + adj.T, 0, 1)
|
#
# Copyright (c) 2009 Testrepository Contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""A command line UI for testrepository."""
import io
import os
import signal
import subunit
import sys
from extras import try_import
v2_avail = try_import('subunit.ByteStreamToStreamResult')
import testtools
from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator
from testtools.compat import unicode_output_stream, _u
from testrepository import ui
from testrepository.commands import get_command_parser
class CLITestResult(ui.BaseUITestResult):
"""A TestResult for the CLI."""
def __init__(self, ui, get_id, stream, previous_run=None, filter_tags=None):
"""Construct a CLITestResult writing to stream.
:param filter_tags: Tags that should be used to filter tests out. When
a tag in this set is present on a test outcome, the test is not
counted towards the test run count. If the test errors, then it is
still counted and the error is still shown.
"""
super(CLITestResult, self).__init__(ui, get_id, previous_run)
self.stream = unicode_output_stream(stream)
self.sep1 = _u('=' * 70 + '\n')
self.sep2 = _u('-' * 70 + '\n')
self.filter_tags = filter_tags or frozenset()
self.filterable_states = set(['success', 'uxsuccess', 'xfail', 'skip'])
def _format_error(self, label, test, error_text, test_tags=None):
test_tags = test_tags or ()
tags = _u(' ').join(test_tags)
if tags:
tags = _u('tags: %s\n') % tags
return _u('').join([
self.sep1,
_u('%s: %s\n') % (label, test.id()),
tags,
self.sep2,
error_text,
])
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
super(CLITestResult, self).status(test_id=test_id,
test_status=test_status, test_tags=test_tags, runnable=runnable,
file_name=file_name, file_bytes=file_bytes, eof=eof,
mime_type=mime_type, route_code=route_code, timestamp=timestamp)
if test_status == 'fail':
self.stream.write(
self._format_error(_u('FAIL'), *(self._summary.errors[-1]),
test_tags=test_tags))
if test_status not in self.filterable_states:
return
if test_tags and test_tags.intersection(self.filter_tags):
self._summary.testsRun -= 1
class UI(ui.AbstractUI):
"""A command line user interface."""
def __init__(self, argv, stdin, stdout, stderr):
"""Create a command line UI.
:param argv: Arguments from the process invocation.
:param stdin: The stream for stdin.
:param stdout: The stream for stdout.
:param stderr: The stream for stderr.
"""
self._argv = argv
self._stdin = stdin
self._stdout = stdout
self._stderr = stderr
self._binary_stdout = None
def _iter_streams(self, stream_type):
# Only the first stream declared in a command can be accepted at the
# moment - as there is only one stdin and alternate streams are not yet
# configurable in the CLI.
first_stream_type = self.cmd.input_streams[0]
if (stream_type != first_stream_type
and stream_type != first_stream_type[:-1]):
return
yield subunit.make_stream_binary(self._stdin)
def make_result(self, get_id, test_command, previous_run=None):
if getattr(self.options, 'subunit', False):
if v2_avail:
serializer = subunit.StreamResultToBytes(self._stdout)
else:
serializer = StreamToExtendedDecorator(
subunit.TestProtocolClient(self._stdout))
# By pass user transforms - just forward it all,
result = serializer
# and interpret everything as success.
summary = testtools.StreamSummary()
summary.startTestRun()
summary.stopTestRun()
return result, summary
else:
# Apply user defined transforms.
filter_tags = test_command.get_filter_tags()
output = CLITestResult(self, get_id, self._stdout, previous_run,
filter_tags=filter_tags)
summary = output._summary
return output, summary
def output_error(self, error_tuple):
if 'TESTR_PDB' in os.environ:
import traceback
self._stderr.write(_u('').join(traceback.format_tb(error_tuple[2])))
self._stderr.write(_u('\n'))
# This is terrible: it is because on Python2.x pdb writes bytes to
# its pipes, and the test suite uses io.StringIO that refuse bytes.
import pdb;
if sys.version_info[0]==2:
if isinstance(self._stdout, io.StringIO):
write = self._stdout.write
def _write(text):
return write(text.decode('utf8'))
self._stdout.write = _write
p = pdb.Pdb(stdin=self._stdin, stdout=self._stdout)
p.reset()
p.interaction(None, error_tuple[2])
error_type = str(error_tuple[1])
# XX: Python2.
if type(error_type) is bytes:
error_type = error_type.decode('utf8')
self._stderr.write(error_type + _u('\n'))
def output_rest(self, rest_string):
self._stdout.write(rest_string)
if not rest_string.endswith('\n'):
self._stdout.write(_u('\n'))
def output_stream(self, stream):
if not self._binary_stdout:
self._binary_stdout = subunit.make_stream_binary(self._stdout)
contents = stream.read(65536)
assert type(contents) is bytes, \
"Bad stream contents %r" % type(contents)
# If there are unflushed bytes in the text wrapper, we need to sync..
self._stdout.flush()
while contents:
self._binary_stdout.write(contents)
contents = stream.read(65536)
self._binary_stdout.flush()
def output_table(self, table):
# stringify
contents = []
for row in table:
new_row = []
for column in row:
new_row.append(str(column))
contents.append(new_row)
if not contents:
return
widths = [0] * len(contents[0])
for row in contents:
for idx, column in enumerate(row):
if widths[idx] < len(column):
widths[idx] = len(column)
# Show a row
outputs = []
def show_row(row):
for idx, column in enumerate(row):
outputs.append(column)
if idx == len(row) - 1:
outputs.append('\n')
return
# spacers for the next column
outputs.append(' '*(widths[idx]-len(column)))
outputs.append(' ')
show_row(contents[0])
# title spacer
for idx, width in enumerate(widths):
outputs.append('-'*width)
if idx == len(widths) - 1:
outputs.append('\n')
continue
outputs.append(' ')
for row in contents[1:]:
show_row(row)
self._stdout.write(_u('').join(outputs))
def output_tests(self, tests):
for test in tests:
# On Python 2.6 id() returns bytes.
id_str = test.id()
if type(id_str) is bytes:
id_str = id_str.decode('utf8')
self._stdout.write(id_str)
self._stdout.write(_u('\n'))
def output_values(self, values):
outputs = []
for label, value in values:
outputs.append('%s=%s' % (label, value))
self._stdout.write(_u('%s\n' % ', '.join(outputs)))
def _format_summary(self, successful, tests, tests_delta,
time, time_delta, values):
# We build the string by appending to a list of strings and then
# joining trivially at the end. Avoids expensive string concatenation.
summary = []
a = summary.append
if tests:
a("Ran %s" % (tests,))
if tests_delta:
a(" (%+d)" % (tests_delta,))
a(" tests")
if time:
if not summary:
a("Ran tests")
a(" in %0.3fs" % (time,))
if time_delta:
a(" (%+0.3fs)" % (time_delta,))
if summary:
a("\n")
if successful:
a('PASSED')
else:
a('FAILED')
if values:
a(' (')
values_strings = []
for name, value, delta in values:
value_str = '%s=%s' % (name, value)
if delta:
value_str += ' (%+d)' % (delta,)
values_strings.append(value_str)
a(', '.join(values_strings))
a(')')
return _u('').join(summary)
def output_summary(self, successful, tests, tests_delta,
time, time_delta, values):
self._stdout.write(
self._format_summary(
successful, tests, tests_delta, time, time_delta, values))
self._stdout.write(_u('\n'))
def _check_cmd(self):
parser = get_command_parser(self.cmd)
parser.add_option("-d", "--here", dest="here",
help="Set the directory or url that a command should run from. "
"This affects all default path lookups but does not affect paths "
"supplied to the command.", default=os.getcwd(), type=str)
parser.add_option("-q", "--quiet", action="store_true", default=False,
help="Turn off output other than the primary output for a command "
"and any errors.")
# yank out --, as optparse makes it silly hard to just preserve it.
try:
where_dashdash = self._argv.index('--')
opt_argv = self._argv[:where_dashdash]
other_args = self._argv[where_dashdash:]
except ValueError:
opt_argv = self._argv
other_args = []
if '-h' in opt_argv or '--help' in opt_argv or '-?' in opt_argv:
self.output_rest(parser.format_help())
# Fugly, but its what optparse does: we're just overriding the
# output path.
raise SystemExit(0)
options, args = parser.parse_args(opt_argv)
args += other_args
self.here = options.here
self.options = options
parsed_args = {}
failed = False
for arg in self.cmd.args:
try:
parsed_args[arg.name] = arg.parse(args)
except ValueError:
exc_info = sys.exc_info()
failed = True
self._stderr.write(_u("%s\n") % str(exc_info[1]))
break
if not failed:
self.arguments = parsed_args
if args != []:
self._stderr.write(_u("Unexpected arguments: %r\n") % args)
return not failed and args == []
def _clear_SIGPIPE(self):
"""Clear SIGPIPE : child processes expect the default handler."""
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_Popen(self, *args, **kwargs):
import subprocess
if os.name == "posix":
# GZ 2010-12-04: Should perhaps check for existing preexec_fn and
# combine so both will get called.
kwargs['preexec_fn'] = self._clear_SIGPIPE
return subprocess.Popen(*args, **kwargs)
|
# Generated by Django 3.1.12 on 2021-06-24 10:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0075_auto_20210607_1312"),
]
operations = [
migrations.AlterField(
model_name="userprofile",
name="lang",
field=models.CharField(
blank=True,
choices=[
("ar", "العربية"),
("as", "অসমীয়া"),
("bcl", "Bikol Central"),
("br", "brezhoneg"),
("da", "dansk"),
("dag", "dagbanli"),
("de", "Deutsch"),
("diq", "Zazaki"),
("en", "English"),
("en-gb", "British English"),
("eo", "Esperanto"),
("es", "español"),
("fa", "فارسی"),
("fi", "suomi"),
("fr", "français"),
("gu", "ગુજરાતી"),
("guw", "gungbe"),
("he", "עברית"),
("hi", "हिन्दी"),
("hy", "հայերեն"),
("id", "Bahasa Indonesia"),
("io", "Ido"),
("it", "italiano"),
("ja", "日本語"),
("ko", "한국어"),
("lv", "latviešu"),
("mk", "македонски"),
("mnw", "ဘာသာ မန်"),
("mr", "मराठी"),
("ms", "Bahasa Melayu"),
("my", "မြန်မာဘာသာ"),
("pl", "polski"),
("pt", "português"),
("pt-br", "português do Brasil"),
("ro", "română"),
("ru", "русский"),
("scn", "sicilianu"),
("sr-ec", "sr-cyrl"),
("sv", "svenska"),
("ta", "தமிழ்"),
("tr", "Türkçe"),
("uk", "українська"),
("vi", "Tiếng Việt"),
("zh-hans", "中文(简体)"),
("zh-hant", "中文(繁體)"),
],
help_text="Language",
max_length=128,
null=True,
),
),
]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import random
import uuid
from google.api_core import client_options
import google.api_core.exceptions
import google.auth
from google.cloud import bigquery
from google.cloud import bigquery_datatransfer
from google.cloud import pubsub_v1
import pytest
RESOURCE_PREFIX = "python_bigquery_datatransfer_samples_snippets"
RESOURCE_DATE_FORMAT = "%Y%m%d%H%M%S"
RESOURCE_DATE_LENGTH = 4 + 2 + 2 + 2 + 2 + 2
def resource_prefix() -> str:
timestamp = datetime.datetime.utcnow().strftime(RESOURCE_DATE_FORMAT)
random_string = hex(random.randrange(1000000))[2:]
return f"{RESOURCE_PREFIX}_{timestamp}_{random_string}"
def resource_name_to_date(resource_name: str):
start_date = len(RESOURCE_PREFIX) + 1
date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH]
parsed_date = datetime.datetime.strptime(date_string, RESOURCE_DATE_FORMAT)
return parsed_date
@pytest.fixture(scope="session", autouse=True)
def cleanup_pubsub_topics(pubsub_client: pubsub_v1.PublisherClient, project_id):
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
for topic in pubsub_client.list_topics(project=f"projects/{project_id}"):
topic_id = topic.name.split("/")[-1]
if (
topic_id.startswith(RESOURCE_PREFIX)
and resource_name_to_date(topic_id) < yesterday
):
pubsub_client.delete_topic(topic=topic.name)
def temp_suffix():
now = datetime.datetime.now()
return f"{now.strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:8]}"
@pytest.fixture(scope="session")
def bigquery_client(default_credentials):
credentials, project_id = default_credentials
return bigquery.Client(credentials=credentials, project=project_id)
@pytest.fixture(scope="session")
def pubsub_client(default_credentials):
credentials, _ = default_credentials
return pubsub_v1.PublisherClient(credentials=credentials)
@pytest.fixture(scope="session")
def pubsub_topic(pubsub_client: pubsub_v1.PublisherClient, project_id):
topic_id = resource_prefix()
topic_path = pubsub_v1.PublisherClient.topic_path(project_id, topic_id)
pubsub_client.create_topic(name=topic_path)
yield topic_path
pubsub_client.delete_topic(topic=topic_path)
@pytest.fixture(scope="session")
def dataset_id(bigquery_client, project_id):
dataset_id = f"bqdts_{temp_suffix()}"
bigquery_client.create_dataset(f"{project_id}.{dataset_id}")
yield dataset_id
bigquery_client.delete_dataset(dataset_id, delete_contents=True)
@pytest.fixture(scope="session")
def default_credentials():
return google.auth.default(["https://www.googleapis.com/auth/cloud-platform"])
@pytest.fixture(scope="session")
def project_id():
return os.environ["GOOGLE_CLOUD_PROJECT"]
@pytest.fixture(scope="session")
def service_account_name(default_credentials):
credentials, _ = default_credentials
# The service_account_email attribute is not available when running with
# user account credentials, but should be available when running from our
# continuous integration tests.
return getattr(credentials, "service_account_email", None)
@pytest.fixture(scope="session")
def transfer_client(default_credentials, project_id):
credentials, _ = default_credentials
options = client_options.ClientOptions(quota_project_id=project_id)
transfer_client = bigquery_datatransfer.DataTransferServiceClient(
credentials=credentials, client_options=options
)
# Ensure quota is always attributed to the correct project.
bigquery_datatransfer.DataTransferServiceClient = lambda: transfer_client
return transfer_client
@pytest.fixture(scope="session")
def transfer_config_name(transfer_client, project_id, dataset_id, service_account_name):
from . import manage_transfer_configs, scheduled_query
# Use the transfer_client fixture so we know quota is attributed to the
# correct project.
assert transfer_client is not None
# To conserve limited BQ-DTS quota, this fixture creates only one transfer
# config for a whole session and is used to test the scheduled_query.py and
# the delete operation in manage_transfer_configs.py.
transfer_config = scheduled_query.create_scheduled_query(
{
"project_id": project_id,
"dataset_id": dataset_id,
"service_account_name": service_account_name,
}
)
yield transfer_config.name
manage_transfer_configs.delete_config(
{"transfer_config_name": transfer_config.name}
)
@pytest.fixture
def to_delete_configs(transfer_client):
to_delete = []
yield to_delete
for config_name in to_delete:
try:
transfer_client.delete_transfer_config(name=config_name)
except google.api_core.exceptions.GoogleAPICallError:
pass
|
#!/usr/bin/env python
import argparse
import os,time
import numpy as np
from astropy.io import fits
from astropy.table import Table
from pypeit import msgs
from pypeit.par.util import make_pypeit_file
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def parser(options=None):
parser = argparse.ArgumentParser(description='Parse', formatter_class=SmartFormatter)
parser.add_argument("sci_path", type=str, help="Path for Science folder")
parser.add_argument("--objmodel", type=str, default='qso', choices=['qso', 'star', 'poly'],
help="R|Science object model used in the telluric fitting.\n"
"The options are:\n"
"\n"
" qso = For quasars. You might need to set redshift, bal_wv_min_mx in the tell file.\n"
"\n"
" star = For stars. You need to set star_type, star_ra, star_dec, and star_mag in the tell_file.\n"
"\n"
" poly = For other type object, You might need to set fit_wv_min_mx, \n"
" and norder in the tell_file."
)
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args):
"""
This setups PypeIt files for fluxing, coadding and telluric corrections.
It will produce three files named as your_spectragraph.flux, your_spectragraph.coadd1d,
and your_spectragraph.tell
"""
allfiles = os.listdir(args.sci_path)
allfiles = np.sort(allfiles)
spec1dfiles = []
spec2dfiles = []
spec1dinfos = []
for ifile in allfiles:
if ('spec1d' in ifile) and ('.fits' in ifile):
spec1dfiles.append(ifile)
elif ('spec2d' in ifile) and ('.fits' in ifile):
spec2dfiles.append(ifile)
elif ('spec1d' in ifile) and ('.txt' in ifile):
spec1dinfos.append(ifile)
else:
msgs.warn('{:} is not a standard PypeIt output.'.format(ifile))
if len(spec2dfiles) > len(spec1dfiles):
msgs.warn('The following exposures do not have 1D extractions:')
for ii in range(len(spec2dfiles)):
if not os.path.exists(os.path.join(args.sci_path, spec2dfiles[ii].replace('spec2d','spec1d'))):
msgs.info('\t {:}'.format(spec2dfiles[ii]))
if len(spec1dfiles) > 0:
par = fits.open(os.path.join(args.sci_path, spec1dfiles[0]))
## fluxing pypeit file
spectrograph = par[0].header['PYP_SPEC']
pypeline = par[0].header['PYPELINE']
flux_file = '{:}.flux'.format(spectrograph)
cfg_lines = ['[fluxcalib]']
cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\n']
cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']
make_pypeit_file(flux_file, spectrograph, spec1dfiles, cfg_lines=cfg_lines, setup_mode=True)
fin = open(flux_file, "rt")
data = fin.read()
data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))
data = data.replace('data', 'flux')
fin.close()
fin = open(flux_file, "wt")
fin.write(data)
fin.close()
## coadd1d pypeit file
coadd1d_file = '{:}.coadd1d'.format(spectrograph)
cfg_lines = ['[coadd1d]']
cfg_lines += [' coaddfile = YOUR_OUTPUT_FILE_NAME # Please set your output file name']
cfg_lines += [' sensfuncfile = YOUR_SENSFUNC_FILE # Please set your SENSFUNC file name']
if pypeline == 'Echelle':
cfg_lines += [' wave_method = velocity # creates a uniformly space grid in log10(lambda)\n']
else:
cfg_lines += [' wave_method = linear # creates a uniformly space grid in lambda\n']
cfg_lines += ['# This file includes all extracted objects. You need to figure out which object you want to \n'+\
'# coadd before running pypeit_coadd_1dspec!!!']
spec1d_info = []
for ii in range(len(spec1dfiles)):
meta_tbl = Table.read(os.path.join(args.sci_path, spec1dfiles[ii]).replace('.fits', '.txt'),
format='ascii.fixed_width')
_, indx = np.unique(meta_tbl['name'],return_index=True)
objects = meta_tbl[indx]
for jj in range(len(objects)):
spec1d_info.append(spec1dfiles[ii] + ' '+ objects['name'][jj])
make_pypeit_file(coadd1d_file, spectrograph, spec1d_info, cfg_lines=cfg_lines, setup_mode=True)
fin = open(coadd1d_file, "rt")
data = fin.read()
data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))
data = data.replace('data', 'coadd1d')
fin.close()
fin = open(coadd1d_file, "wt")
fin.write(data)
fin.close()
## tellfit pypeit file
tellfit_file = '{:}.tell'.format(spectrograph)
cfg_lines = ['[tellfit]']
if args.objmodel == 'qso':
cfg_lines += [' objmodel = qso']
cfg_lines += [' redshift = 0.0']
cfg_lines += [' bal_wv_min_max = 10000.,11000.']
elif args.objmodel == 'star':
cfg_lines += [' objmodel = star']
cfg_lines += [' star_type = A0']
cfg_lines += [' star_mag = 0.0']
elif args.objmodel == 'poly':
cfg_lines += [' objmodel = poly']
cfg_lines += [' polyorder = 5']
cfg_lines += [' fit_wv_min_max = 17000.0,22000.0']
with open(tellfit_file, 'w') as f:
f.write('# Auto-generated PypeIt file\n')
f.write('# {0}\n'.format(time.strftime("%a %d %b %Y %H:%M:%S", time.localtime())))
f.write("\n")
f.write("# User-defined execution parameters\n")
f.write("# This is only an example. Make sure to change the following parameters accordingly.\n")
f.write('\n'.join(cfg_lines))
f.write('\n')
f.write('\n')
msgs.info('PypeIt file written to: {0}'.format(tellfit_file))
|
# cinder documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set
# to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
import warnings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = ['sphinx.ext.autodoc',
'ext.cinder_todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'oslosphinx',
'stevedore.sphinxext',
'oslo_config.sphinxconfiggen',
]
config_generator_config_file = '../../cinder/config/cinder-config-generator.conf'
sample_config_basename = '_static/cinder'
# autodoc generation is a bit aggressive and a nuisance
# when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1"
# in your terminal to disable
if not os.getenv('SPHINX_DEBUG'):
extensions += ['ext.cinder_autodoc']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# Changing the path so that the Hudson build output contains GA code
# and the source docs do not contain the code so local, offline sphinx builds
# are "clean."
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cinder'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from cinder.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = [
'api_ext/rst_extension_template',
'installer',
]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use
# for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['cinder.']
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/cinder-manage', 'cinder-manage', u'Cloud controller fabric',
[u'OpenStack'], 1)
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
except Exception:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'cinderdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Cinder.tex', u'Cinder Documentation',
u'Anso Labs, LLC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
# -*- coding: utf-8 -*-
"""
Django settings for Agrus project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from pytz import timezone
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gz6=&2*879yuym!pf(d8kch*30ow*eh=ybb-f0qsg+%c4+$@3c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'Agrus',
'kurs',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Agrus.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Agrus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.oracle',
'NAME': 'xe',
'USER': 'django',
'PASSWORD': 'djangooracle',
'HOST': 'localhost',
'PORT': '1521'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout'
|
"""awards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^',include('award.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
]
|
__version__ = '1.0.5'
# Deprecated, keep it here for a while for backward compatibility.
import multidict # noqa
# This relies on each of the submodules having an __all__ variable.
from multidict import * # noqa
from . import hdrs # noqa
from .protocol import * # noqa
from .connector import * # noqa
from .client import * # noqa
from .client_reqrep import * # noqa
from .errors import * # noqa
from .helpers import * # noqa
from .parsers import * # noqa
from .streams import * # noqa
from .multipart import * # noqa
from .client_ws import ClientWebSocketResponse # noqa
from ._ws_impl import WSMsgType, WSCloseCode, WSMessage, WebSocketError # noqa
from .file_sender import FileSender # noqa
from .cookiejar import CookieJar # noqa
from .resolver import * # noqa
MsgType = WSMsgType # backward compatibility
__all__ = (client.__all__ + # noqa
client_reqrep.__all__ + # noqa
errors.__all__ + # noqa
helpers.__all__ + # noqa
parsers.__all__ + # noqa
protocol.__all__ + # noqa
connector.__all__ + # noqa
streams.__all__ + # noqa
multidict.__all__ + # noqa
multipart.__all__ + # noqa
('hdrs', 'FileSender', 'WSMsgType', 'MsgType', 'WSCloseCode',
'WebSocketError', 'WSMessage',
'ClientWebSocketResponse', 'CookieJar'))
|
class Solution:
def findMin(self, nums: List[int]) -> int:
l = 0
r = len(nums) - 1
while r - l > 3:
m = (l + r) // 2
if nums[m] > nums[l] and nums[m] > nums[r]:
l = m + 1
else:
r = m
return min(nums[l:r+1])
|
from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.db import models
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.11/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing GeometryFields.
"""
if isinstance(db_field, models.GeometryField):
request = kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION': collection_type = 'Any'
else: collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
params = {'default_lon' : self.default_lon,
'default_lat' : self.default_lat,
'default_zoom' : self.default_zoom,
'display_wkt' : self.debug or self.display_wkt,
'geom_type' : OGRGeomType(db_field.geom_type),
'field_name' : db_field.name,
'is_collection' : is_collection,
'scrollable' : self.scrollable,
'layerswitcher' : self.layerswitcher,
'collection_type' : collection_type,
'is_linestring' : db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon' : db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point' : db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom' : self.num_zoom,
'max_zoom' : self.max_zoom,
'min_zoom' : self.min_zoom,
'units' : self.units, #likely shoud get from object
'max_resolution' : self.max_resolution,
'max_extent' : self.max_extent,
'modifiable' : self.modifiable,
'mouse_position' : self.mouse_position,
'scale_text' : self.scale_text,
'map_width' : self.map_width,
'map_height' : self.map_height,
'point_zoom' : self.point_zoom,
'srid' : self.map_srid,
'display_srid' : self.display_srid,
'wms_url' : self.wms_url,
'wms_layer' : self.wms_layer,
'wms_name' : self.wms_name,
'debug' : self.debug,
}
return OLMap
from django.contrib.gis import gdal
if gdal.HAS_GDAL:
# Use the official spherical mercator projection SRID on versions
# of GDAL that support it; otherwise, fallback to 900913.
if gdal.GDAL_VERSION >= (1, 7):
spherical_mercator_srid = 3857
else:
spherical_mercator_srid = 900913
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DistributionConfigurationArgs', 'DistributionConfiguration']
@pulumi.input_type
class DistributionConfigurationArgs:
def __init__(__self__, *,
distributions: pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DistributionConfiguration resource.
:param pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]] distributions: One or more configuration blocks with distribution settings. Detailed below.
:param pulumi.Input[str] description: Description to apply to the distributed AMI.
:param pulumi.Input[str] name: Name to apply to the distributed AMI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the distribution configuration. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "distributions", distributions)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def distributions(self) -> pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]]:
"""
One or more configuration blocks with distribution settings. Detailed below.
"""
return pulumi.get(self, "distributions")
@distributions.setter
def distributions(self, value: pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]]):
pulumi.set(self, "distributions", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description to apply to the distributed AMI.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name to apply to the distributed AMI.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags for the distribution configuration. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _DistributionConfigurationState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
date_created: Optional[pulumi.Input[str]] = None,
date_updated: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distributions: Optional[pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering DistributionConfiguration resources.
:param pulumi.Input[str] arn: (Required) Amazon Resource Name (ARN) of the distribution configuration.
:param pulumi.Input[str] date_created: Date the distribution configuration was created.
:param pulumi.Input[str] date_updated: Date the distribution configuration was updated.
:param pulumi.Input[str] description: Description to apply to the distributed AMI.
:param pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]] distributions: One or more configuration blocks with distribution settings. Detailed below.
:param pulumi.Input[str] name: Name to apply to the distributed AMI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the distribution configuration. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if date_created is not None:
pulumi.set(__self__, "date_created", date_created)
if date_updated is not None:
pulumi.set(__self__, "date_updated", date_updated)
if description is not None:
pulumi.set(__self__, "description", description)
if distributions is not None:
pulumi.set(__self__, "distributions", distributions)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
(Required) Amazon Resource Name (ARN) of the distribution configuration.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> Optional[pulumi.Input[str]]:
"""
Date the distribution configuration was created.
"""
return pulumi.get(self, "date_created")
@date_created.setter
def date_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date_created", value)
@property
@pulumi.getter(name="dateUpdated")
def date_updated(self) -> Optional[pulumi.Input[str]]:
"""
Date the distribution configuration was updated.
"""
return pulumi.get(self, "date_updated")
@date_updated.setter
def date_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date_updated", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description to apply to the distributed AMI.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def distributions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]]]:
"""
One or more configuration blocks with distribution settings. Detailed below.
"""
return pulumi.get(self, "distributions")
@distributions.setter
def distributions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DistributionConfigurationDistributionArgs']]]]):
pulumi.set(self, "distributions", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name to apply to the distributed AMI.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags for the distribution configuration. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class DistributionConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
distributions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DistributionConfigurationDistributionArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an Image Builder Distribution Configuration.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.DistributionConfiguration("example", distributions=[aws.imagebuilder.DistributionConfigurationDistributionArgs(
ami_distribution_configuration=aws.imagebuilder.DistributionConfigurationDistributionAmiDistributionConfigurationArgs(
ami_tags={
"CostCenter": "IT",
},
launch_permission=aws.imagebuilder.DistributionConfigurationDistributionAmiDistributionConfigurationLaunchPermissionArgs(
user_ids=["123456789012"],
),
name="example-{{ imagebuilder:buildDate }}",
),
region="us-east-1",
)])
```
## Import
`aws_imagebuilder_distribution_configurations` resources can be imported by using the Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:imagebuilder/distributionConfiguration:DistributionConfiguration example arn:aws:imagebuilder:us-east-1:123456789012:distribution-configuration/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description to apply to the distributed AMI.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DistributionConfigurationDistributionArgs']]]] distributions: One or more configuration blocks with distribution settings. Detailed below.
:param pulumi.Input[str] name: Name to apply to the distributed AMI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the distribution configuration. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DistributionConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Image Builder Distribution Configuration.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.DistributionConfiguration("example", distributions=[aws.imagebuilder.DistributionConfigurationDistributionArgs(
ami_distribution_configuration=aws.imagebuilder.DistributionConfigurationDistributionAmiDistributionConfigurationArgs(
ami_tags={
"CostCenter": "IT",
},
launch_permission=aws.imagebuilder.DistributionConfigurationDistributionAmiDistributionConfigurationLaunchPermissionArgs(
user_ids=["123456789012"],
),
name="example-{{ imagebuilder:buildDate }}",
),
region="us-east-1",
)])
```
## Import
`aws_imagebuilder_distribution_configurations` resources can be imported by using the Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:imagebuilder/distributionConfiguration:DistributionConfiguration example arn:aws:imagebuilder:us-east-1:123456789012:distribution-configuration/example
```
:param str resource_name: The name of the resource.
:param DistributionConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DistributionConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
distributions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DistributionConfigurationDistributionArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DistributionConfigurationArgs.__new__(DistributionConfigurationArgs)
__props__.__dict__["description"] = description
if distributions is None and not opts.urn:
raise TypeError("Missing required property 'distributions'")
__props__.__dict__["distributions"] = distributions
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["date_created"] = None
__props__.__dict__["date_updated"] = None
__props__.__dict__["tags_all"] = None
super(DistributionConfiguration, __self__).__init__(
'aws:imagebuilder/distributionConfiguration:DistributionConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
date_created: Optional[pulumi.Input[str]] = None,
date_updated: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distributions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DistributionConfigurationDistributionArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'DistributionConfiguration':
"""
Get an existing DistributionConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: (Required) Amazon Resource Name (ARN) of the distribution configuration.
:param pulumi.Input[str] date_created: Date the distribution configuration was created.
:param pulumi.Input[str] date_updated: Date the distribution configuration was updated.
:param pulumi.Input[str] description: Description to apply to the distributed AMI.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DistributionConfigurationDistributionArgs']]]] distributions: One or more configuration blocks with distribution settings. Detailed below.
:param pulumi.Input[str] name: Name to apply to the distributed AMI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the distribution configuration. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DistributionConfigurationState.__new__(_DistributionConfigurationState)
__props__.__dict__["arn"] = arn
__props__.__dict__["date_created"] = date_created
__props__.__dict__["date_updated"] = date_updated
__props__.__dict__["description"] = description
__props__.__dict__["distributions"] = distributions
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return DistributionConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
(Required) Amazon Resource Name (ARN) of the distribution configuration.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> pulumi.Output[str]:
"""
Date the distribution configuration was created.
"""
return pulumi.get(self, "date_created")
@property
@pulumi.getter(name="dateUpdated")
def date_updated(self) -> pulumi.Output[str]:
"""
Date the distribution configuration was updated.
"""
return pulumi.get(self, "date_updated")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description to apply to the distributed AMI.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def distributions(self) -> pulumi.Output[Sequence['outputs.DistributionConfigurationDistribution']]:
"""
One or more configuration blocks with distribution settings. Detailed below.
"""
return pulumi.get(self, "distributions")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name to apply to the distributed AMI.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags for the distribution configuration. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
|
######################################################################
# Copyright
# John Holland <john@zoner.org>
# All rights reserved.
#
# This software is licensed as described in the file LICENSE.txt, which
# you should have received as part of this distribution.
#
######################################################################
"""
Apply local overrides to the current map.
Overrides defined in a xml document.
NOT IMPLEMENTED
"""
class map_override(object):
"""
Apply local overrides to the current map. Overrides defined in a xml document.
"""
def __init__(self, map_root, override_file, icvn, vriic, fic):
pass
def _set_value(self, map_root, path, variable, value):
pass
def _append_value(self, map_root, path, variable, value):
pass
def _reset_list(self, map_root, path, variable, value):
pass
|
load(
"//haskell:providers.bzl",
"HaskellInfo",
"HaskellLibraryInfo",
)
load(":private/set.bzl", "set")
def gather_dep_info(ctx, deps):
"""Collapse dependencies into a single `HaskellInfo`.
Args:
ctx: Rule context.
deps: deps attribute.
Returns:
HaskellInfo: Unified information about all dependencies.
"""
package_databases = depset(transitive = [
dep[HaskellInfo].package_databases
for dep in deps
if HaskellInfo in dep
])
static_libraries = depset(transitive = [
dep[HaskellInfo].static_libraries
for dep in deps
if HaskellInfo in dep
])
dynamic_libraries = depset(transitive = [
dep[HaskellInfo].dynamic_libraries
for dep in deps
if HaskellInfo in dep
])
interface_dirs = depset(transitive = [
dep[HaskellInfo].interface_dirs
for dep in deps
if HaskellInfo in dep
])
source_files = depset(transitive = [
dep[HaskellInfo].source_files
for dep in deps
if HaskellInfo in dep
])
import_dirs = set.empty()
for dep in deps:
if HaskellInfo in dep:
import_dirs = set.mutable_union(import_dirs, dep[HaskellInfo].import_dirs)
extra_source_files = depset(transitive = [
dep[HaskellInfo].extra_source_files
for dep in deps
if HaskellInfo in dep
])
compile_flags = []
for dep in deps:
if HaskellInfo in dep:
compile_flags.extend(dep[HaskellInfo].compile_flags)
acc = HaskellInfo(
package_databases = package_databases,
version_macros = set.empty(),
static_libraries = static_libraries,
dynamic_libraries = dynamic_libraries,
interface_dirs = interface_dirs,
source_files = source_files,
import_dirs = import_dirs,
extra_source_files = extra_source_files,
compile_flags = compile_flags,
)
for dep in deps:
if HaskellInfo in dep:
binfo = dep[HaskellInfo]
if HaskellLibraryInfo not in dep:
fail("Target {0} cannot depend on binary".format(ctx.attr.name))
acc = HaskellInfo(
package_databases = acc.package_databases,
version_macros = set.mutable_union(acc.version_macros, binfo.version_macros),
static_libraries = depset(transitive = [acc.static_libraries, binfo.static_libraries]),
dynamic_libraries = acc.dynamic_libraries,
interface_dirs = acc.interface_dirs,
import_dirs = import_dirs,
compile_flags = compile_flags,
extra_source_files = extra_source_files,
source_files = source_files,
)
elif CcInfo in dep and HaskellInfo not in dep:
# The final link of a binary must include all static libraries we
# depend on, including transitives ones. Theses libs are provided
# in the `CcInfo` provider.
acc = HaskellInfo(
package_databases = acc.package_databases,
version_macros = acc.version_macros,
import_dirs = acc.import_dirs,
source_files = acc.source_files,
compile_flags = acc.compile_flags,
static_libraries = acc.static_libraries,
dynamic_libraries = acc.dynamic_libraries,
extra_source_files = acc.extra_source_files,
interface_dirs = acc.interface_dirs,
)
return acc
|
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.contrib import admin
from .models import Task, Segment, Job, Label, AttributeSpec
class JobInline(admin.TabularInline):
model = Job
can_delete = False
# Don't show extra lines to add an object
def has_add_permission(self, request, object=None):
return False
class SegmentInline(admin.TabularInline):
model = Segment
show_change_link = True
readonly_fields = ('start_frame', 'stop_frame')
can_delete = False
# Don't show extra lines to add an object
def has_add_permission(self, request, object=None):
return False
class AttributeSpecInline(admin.TabularInline):
model = AttributeSpec
extra = 0
max_num = None
class LabelInline(admin.TabularInline):
model = Label
show_change_link = True
extra = 0
max_num = None
class LabelAdmin(admin.ModelAdmin):
# Don't show on admin index page
def has_module_permission(self, request):
return False
inlines = [
AttributeSpecInline
]
class SegmentAdmin(admin.ModelAdmin):
# Don't show on admin index page
def has_module_permission(self, request):
return False
inlines = [
JobInline
]
class TaskAdmin(admin.ModelAdmin):
date_hierarchy = 'updated_date'
readonly_fields = ('size', 'path', 'created_date', 'updated_date',
'overlap', 'flipped')
list_display = ('name', 'mode', 'owner', 'assignee', 'created_date', 'updated_date')
search_fields = ('name', 'mode', 'owner__username', 'owner__first_name',
'owner__last_name', 'owner__email', 'assignee__username', 'assignee__first_name',
'assignee__last_name')
inlines = [
SegmentInline,
LabelInline
]
# Don't allow to add a task because it isn't trivial operation
def has_add_permission(self, request):
return False
admin.site.register(Task, TaskAdmin)
admin.site.register(Segment, SegmentAdmin)
admin.site.register(Label, LabelAdmin)
|
__all__ = ['ZoneMapper']
|
# Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
This module has been relocated to ``dazl.client``, ``dazl.damlast``, ``dazl.protocols``, or
``dazl.query``.
"""
from typing import TYPE_CHECKING, TypeVar, Union
import warnings
from ..client.errors import ConfigurationError, DazlPartyMissingError, UnknownTemplateWarning
from ..client.state import (
ContractContextualData,
ContractContextualDataCollection,
ContractsHistoricalState,
ContractsState,
)
from ..damlast.daml_lf_1 import TypeConName
from ..damlast.pkgfile import Dar
from ..prim import ContractData, ContractId as ContractId_, DazlError, DazlWarning, Party
from ..prim.errors import DazlImportError
from ..protocols.errors import ConnectionTimeoutError, UserTerminateRequest
from ..query import ContractMatch
from ..util.proc_util import ProcessDiedException
if TYPE_CHECKING:
from .types import Type, TypeReference
T = TypeVar("T")
__all__ = [
"ConfigurationError",
"ConnectionTimeoutError",
"ContractContextualData",
"ContractContextualDataCollection",
"ContractData",
"ContractId",
"ContractMatch",
"ContractsHistoricalState",
"ContractsState",
"Dar",
"DazlError",
"DazlImportError",
"DazlPartyMissingError",
"DazlWarning",
"Party",
"ProcessDiedException",
"UnknownTemplateWarning",
"UserTerminateRequest",
]
class ContractId(ContractId_):
__slots__ = ("_value_type_deprecated",)
_value_type_deprecated: "TypeReference"
def __init__(self, contract_id: str, template_id: "Union[str, Type, TypeConName]"):
warnings.warn(
"dazl.model.core.ContractId is deprecated; use dazl.prim.ContractId instead.",
DeprecationWarning,
stacklevel=2,
)
from ..damlast.compat import parse_template
if not isinstance(contract_id, str):
raise ValueError("contract_id must be a string")
value = contract_id
value_type, value_type_deprecated = parse_template(template_id)
super().__init__(value_type, value)
object.__setattr__(self, "_value_type_deprecated", value_type_deprecated)
@property
def contract_id(self) -> str:
"""
Get the raw contract ID value (for example, ``"#4:1"``).
"""
warnings.warn(
"ContractId.contract_id is deprecated; use ContractId.value instead.",
DeprecationWarning,
stacklevel=2,
)
return self.value
@property
def template_id(self) -> "TypeReference":
"""
Get the type of template that is pointed to by this :class:`ContractId` as a
:class:`TypeReference`. Note that usage of :class:`Type` and :class:`TypeReference` are
deprecated, and :meth:`value_type` should be used instead.
As of dazl 7.3.0, the :class:`TemplateId` is always normalized to a :class:`TypeReference`,
regardless of what the :class:`ContractId` was constructed with.
"""
warnings.warn(
"ContractId.template_id is deprecated; use ContractId.value_type instead.",
DeprecationWarning,
stacklevel=2,
)
return self._value_type_deprecated
def exercise(self, choice_name, arguments=None):
"""
Create an :class:`ExerciseCommand` that represents the result of exercising a choice on this
contract with the specified choice.
:param choice_name:
The name of the choice to exercise.
:param arguments:
(optional) A ``dict`` of named values to send as parameters to the choice exercise.
"""
warnings.warn(
"ContractId.exercise is deprecated; prefer calling dazl.ledger.Connection.exercise or "
"dazl.client.PartyClient.submit_exercise, or use dazl.ledger.ExerciseCommand instead.",
DeprecationWarning,
stacklevel=2,
)
from .writing import ExerciseCommand
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return ExerciseCommand(self, choice_name, arguments=arguments)
def replace(self, contract_id=None, template_id=None):
"""
Return a new :class:`ContractId` instance replacing specified fields with values.
"""
warnings.warn(
"ContractId.replace is deprecated; simply construct a ContractId with the desired "
"values instead.",
DeprecationWarning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return ContractId(
contract_id if contract_id is not None else self.value,
template_id if template_id is not None else self.value_type,
)
def for_json(self):
"""
Return the JSON representation of this contract. This is currently just the contract ID
string itself.
"""
return self.value
class CommandTimeoutError(DazlError):
"""
Raised when a corresponding event for a command was not seen in the appropriate time window.
"""
def __init__(self):
warnings.warn(
"This error is never raised; this symbol will be removed in dazl v9",
DeprecationWarning,
stacklevel=2,
)
class ConnectionClosedError(DazlError):
"""
Raised when trying to do something that requires a connection after connection pools have been
closed.
"""
def __init__(self):
warnings.warn(
"This error is never raised; this symbol will be removed in dazl v9",
DeprecationWarning,
stacklevel=2,
)
|
# full assembly of the sub-parts to form the complete net
import torch.nn.functional as F
from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
self.sig = nn.Sigmoid()
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
x = self.sig(x)
return x
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# pylint: disable=too-few-public-methods
"""
`adafruit_register.i2c_struct`
====================================================
Generic structured registers based on `struct`
* Author(s): Scott Shawcroft
"""
try:
import struct
except ImportError:
import ustruct as struct
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Register.git"
class Struct:
"""
Arbitrary structure register that is readable and writeable.
Values are tuples that map to the values in the defined struct. See struct
module documentation for struct format string and its possible value types.
:param int register_address: The register address to read the bit from
:param type struct_format: The struct format string for this register.
"""
def __init__(self, register_address, struct_format):
self.format = struct_format
self.buffer = bytearray(1 + struct.calcsize(self.format))
self.buffer[0] = register_address
def __get__(self, obj, objtype=None):
with obj.i2c_device as i2c:
i2c.write_then_readinto(self.buffer, self.buffer, out_end=1, in_start=1)
return struct.unpack_from(self.format, memoryview(self.buffer)[1:])
def __set__(self, obj, value):
struct.pack_into(self.format, self.buffer, 1, *value)
with obj.i2c_device as i2c:
i2c.write(self.buffer)
class UnaryStruct:
"""
Arbitrary single value structure register that is readable and writeable.
Values map to the first value in the defined struct. See struct
module documentation for struct format string and its possible value types.
:param int register_address: The register address to read the bit from
:param type struct_format: The struct format string for this register.
"""
def __init__(self, register_address, struct_format):
self.format = struct_format
self.address = register_address
def __get__(self, obj, objtype=None):
buf = bytearray(1 + struct.calcsize(self.format))
buf[0] = self.address
with obj.i2c_device as i2c:
i2c.write_then_readinto(buf, buf, out_end=1, in_start=1)
return struct.unpack_from(self.format, buf, 1)[0]
def __set__(self, obj, value):
buf = bytearray(1 + struct.calcsize(self.format))
buf[0] = self.address
struct.pack_into(self.format, buf, 1, value)
with obj.i2c_device as i2c:
i2c.write(buf)
class ROUnaryStruct(UnaryStruct):
"""
Arbitrary single value structure register that is read-only.
Values map to the first value in the defined struct. See struct
module documentation for struct format string and its possible value types.
:param int register_address: The register address to read the bit from
:param type struct_format: The struct format string for this register.
"""
def __set__(self, obj, value):
raise AttributeError()
|
from unittest import TestCase
from model_mommy import mommy
from physical.commands import HostCommandOL6, HostCommandOL7
class CommandsBaseTestCase(object):
OS_VERSION = ''
HOST_COMMAND_CLASS = None
EXPECTED_CMD_TMPL = ''
def setUp(self):
self.host = mommy.make(
'Host',
os_description='OL {}'.format(self.OS_VERSION)
)
self.instance = mommy.make('Instance', hostname=self.host)
def test_is_instance(self):
self.assertTrue(
isinstance(self.host.commands, self.HOST_COMMAND_CLASS)
)
def test_start(self):
cmd = self.host.commands.exec_service_command(
service_name='fake_service_name',
action='fake_start'
)
self.assertEqual(
cmd,
self.EXPECTED_CMD_TMPL.format(
service_name='fake_service_name',
action='fake_start'
)
)
def test_stop(self):
cmd = self.host.commands.exec_service_command(
service_name='fake_service_name',
action='fake_stop'
)
self.assertEqual(
cmd,
self.EXPECTED_CMD_TMPL.format(
service_name='fake_service_name',
action='fake_stop'
)
)
def test_start_no_output(self):
cmd = self.host.commands.exec_service_command(
service_name='fake_service_name',
action='fake_start',
no_output=True
)
expected_cmd = '{} > /dev/null'.format(
self.EXPECTED_CMD_TMPL.format(
service_name='fake_service_name',
action='fake_start'
)
)
self.assertEqual(
cmd,
expected_cmd
)
def test_stop_no_output(self):
cmd = self.host.commands.exec_service_command(
service_name='fake_service_name',
action='fake_stop',
no_output=True
)
expected_cmd = '{} > /dev/null'.format(
self.EXPECTED_CMD_TMPL.format(
service_name='fake_service_name',
action='fake_stop'
)
)
self.assertEqual(
cmd,
expected_cmd
)
class CustomCommandOL6TestCase(CommandsBaseTestCase, TestCase):
OS_VERSION = '6.10'
HOST_COMMAND_CLASS = HostCommandOL6
EXPECTED_CMD_TMPL = '/etc/init.d/{service_name} {action}'
class CustomCommandOL7TestCase(CommandsBaseTestCase, TestCase):
OS_VERSION = '7.10'
HOST_COMMAND_CLASS = HostCommandOL7
EXPECTED_CMD_TMPL = 'sudo systemctl {action} {service_name}.service'
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2020, David Swarbrick.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for image resizing based on filesize."""
from __future__ import division, absolute_import, print_function
import unittest
import os
from test import _common
from test.helper import TestHelper
from beets.util import syspath
from beets.util.artresizer import (
pil_resize,
im_resize,
get_im_version,
get_pil_version,
)
class ArtResizerFileSizeTest(_common.TestCase, TestHelper):
"""Unittest test case for Art Resizer to a specific filesize."""
IMG_225x225 = os.path.join(_common.RSRC, b"abbey.jpg")
IMG_225x225_SIZE = os.stat(syspath(IMG_225x225)).st_size
def setUp(self):
"""Called before each test, setting up beets."""
self.setup_beets()
def tearDown(self):
"""Called after each test, unloading all plugins."""
self.teardown_beets()
def _test_img_resize(self, resize_func):
"""Test resizing based on file size, given a resize_func."""
# Check quality setting unaffected by new parameter
im_95_qual = resize_func(
225,
self.IMG_225x225,
quality=95,
max_filesize=0,
)
# check valid path returned - max_filesize hasn't broken resize command
self.assertExists(im_95_qual)
# Attempt a lower filesize with same quality
im_a = resize_func(
225,
self.IMG_225x225,
quality=95,
max_filesize=0.9 * os.stat(syspath(im_95_qual)).st_size,
)
self.assertExists(im_a)
# target size was achieved
self.assertLess(os.stat(syspath(im_a)).st_size,
os.stat(syspath(im_95_qual)).st_size)
# Attempt with lower initial quality
im_75_qual = resize_func(
225,
self.IMG_225x225,
quality=75,
max_filesize=0,
)
self.assertExists(im_75_qual)
im_b = resize_func(
225,
self.IMG_225x225,
quality=95,
max_filesize=0.9 * os.stat(syspath(im_75_qual)).st_size,
)
self.assertExists(im_b)
# Check high (initial) quality still gives a smaller filesize
self.assertLess(os.stat(syspath(im_b)).st_size,
os.stat(syspath(im_75_qual)).st_size)
@unittest.skipUnless(get_pil_version(), "PIL not available")
def test_pil_file_resize(self):
"""Test PIL resize function is lowering file size."""
self._test_img_resize(pil_resize)
@unittest.skipUnless(get_im_version(), "ImageMagick not available")
def test_im_file_resize(self):
"""Test IM resize function is lowering file size."""
self._test_img_resize(im_resize)
def suite():
"""Run this suite of tests."""
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
# Copyright (C) 2019-2020, Therapixel SA.
# All rights reserved.
# This file is subject to the terms and conditions described in the
# LICENSE file distributed in this package.
"""The dcm2model module provides methods that can be used to convert pydicom.Dataset
instances to sqlalchemy instances.
"""
from typing import Tuple, Union
from pydicom import Dataset, dcmread
from pacsanini.convert import agestr2years, dcm2dict, str2datetime
from pacsanini.db.models import Image, Patient, Series, Study, StudyFind
from pacsanini.parse import DicomTagGroup
def dcm2patient(dcm: Dataset, institution: str = None) -> Patient:
"""Convert a DICOM file to a Patient instance that can be inserted
in the database.
Parameters
----------
dcm : Dataset
The DICOM data to convert to a Patient instance.
institution : str
If set, add a specified institution name to the Patient
model. The default is None.
Returns
-------
Patient
The Patient model.
"""
tag_grp = DicomTagGroup(
tags=[
{"tag_name": "PatientID", "tag_alias": "patient_id"},
{"tag_name": "PatientName", "tag_alias": "patient_name", "callback": str},
{
"tag_name": "PatientBirthDate",
"tag_alias": "patient_birth_date",
"callback": str2datetime,
},
]
)
data = tag_grp.parse_dicom(dcm)
data["institution"] = institution
return Patient(**data)
def dcm2study(dcm: Dataset) -> Study:
"""Convert a DICOM file to a Study instance that can be inserted
in the database.
Parameters
----------
dcm : Dataset
The DICOM data to convert to a Study instance.
Returns
-------
Study
The Study model.
"""
tag_grp = DicomTagGroup(
tags=[
{"tag_name": "StudyInstanceUID", "tag_alias": "study_uid"},
{
"tag_name": "StudyDate",
"tag_alias": "study_date",
"callback": str2datetime,
},
{
"tag_name": "PatientAge",
"tag_alias": "patient_age",
"callback": agestr2years,
"default": -1,
},
{"tag_name": "AccessionNumber", "tag_alias": "accession_number"},
]
)
data = tag_grp.parse_dicom(dcm)
return Study(**data)
def dcm2study_finding(dcm: Dataset) -> StudyFind:
"""Convert a DICOM file to a StudyFind instance that can be inserted
in the database.
Parameters
----------
dcm : Dataset
The DICOM data to convert to a StudyFind instance.
Returns
-------
StudyFind
The StudyFind model.
"""
tag_grp = DicomTagGroup(
tags=[
{"tag_name": "PatientName", "tag_alias": "patient_name", "callback": str},
{"tag_name": "PatientID", "tag_alias": "patient_id"},
{"tag_name": "StudyInstanceUID", "tag_alias": "study_uid"},
{
"tag_name": "StudyDate",
"tag_alias": "study_date",
"callback": str2datetime,
},
{"tag_name": "AccessionNumber", "tag_alias": "accession_number"},
]
)
data = tag_grp.parse_dicom(dcm)
return StudyFind(**data)
def dcm2series(dcm: Dataset) -> Series:
"""Convert a DICOM file to a Series instance that can be inserted
in the database.
Parameters
----------
dcm : Dataset
The DICOM data to convert to a Series instance.
Returns
-------
Series
The Series model.
"""
tag_grp = DicomTagGroup(
tags=[
{"tag_name": "SeriesInstanceUID", "tag_alias": "series_uid"},
{"tag_name": "Modality", "tag_alias": "modality"},
]
)
data = tag_grp.parse_dicom(dcm)
return Series(**data)
def dcm2image(dcm: Dataset, institution: str = None, filepath: str = None) -> Image:
"""Convert a DICOM file to a Image instance that can be inserted
in the database.
Parameters
----------
dcm : Dataset
The DICOM data to convert to a Image instance.
institution : str
If set, add a specified institution name to the Image
model. The default is None.
filepath : str
If set, add the DICOM's filepath to the database. The default
is None.
Returns
-------
Image
The Image model.
"""
tag_grp = DicomTagGroup(
tags=[
{"tag_name": "PatientID", "tag_alias": "patient_id"},
{"tag_name": "StudyInstanceUID", "tag_alias": "study_uid"},
{
"tag_name": "StudyDate",
"tag_alias": "study_date",
"callback": str2datetime,
},
{"tag_name": "SeriesInstanceUID", "tag_alias": "series_uid"},
{"tag_name": "Modality", "tag_alias": "modality"},
{"tag_name": "SOPClassUID", "tag_alias": "sop_class_uid"},
{"tag_name": "SOPInstanceUID", "tag_alias": "image_uid"},
{"tag_name": "AcquisitionTime", "tag_alias": "acquisition_time"},
{"tag_name": "Manufacturer", "tag_alias": "manufacturer"},
{
"tag_name": "ManufacturerModelName",
"tag_alias": "manufacturer_model_name",
},
]
)
data = tag_grp.parse_dicom(dcm)
data["meta"] = dcm2dict(dcm, include_pixels=False)
data["institution"] = institution
data["filepath"] = filepath
return Image(**data)
def dcm2dbmodels(
dcm: Union[str, Dataset], institution: str = None, filepath: str = None
) -> Tuple[Patient, Study, Series, Image]:
"""Convert a DICOM file into the different database models that will be used
to insert the DICOM data into the database.
Parameters
----------
dcm : Union[str, Dataset]
The DICOM data to convert to a Patient, Study, Series, and Image instance.
institution : str
If set, add a specified institution name to the Patient
model. The default is None.
filepath : str
If set, add the DICOM's filepath to the database. The default
is None. If the input dcm parameter value is a string, filepath
will be set to this.
Returns
-------
Tuple[Patient, Study, Series, Image]
A 4-tuple corresponding to the image's
"""
if isinstance(dcm, str):
filepath = dcm
dcm = dcmread(dcm, stop_before_pixels=True)
pat = dcm2patient(dcm, institution=institution)
study = dcm2study(dcm)
series = dcm2series(dcm)
image = dcm2image(dcm, institution=institution, filepath=filepath)
return pat, study, series, image
|
from typing import Callable, Optional
import gin
import gym
from interact.agents.ddpg.ddpg import DDPGAgent
from interact.agents.utils import register
@gin.configurable(name_or_fn="td3", denylist=["env_fn"])
@register("td3")
class TD3Agent(DDPGAgent):
"""The Twin Delayed DDPG (TD3) algorithm.
This algorithm is a minor modification of DDPG. This class is merely a wrapper
around DDPG with the TD3 features enabled by default. Namely, TD3 uses twin
critic networks, delayed policy updates, and target policy smoothing.
Args:
env_fn: A function that, when called, returns an instance of the agent's
environment.
network: Base network type to be used by the policy and Q-functions.
actor_lr: Learning rate to use for updating the actor.
critic_lr: Learning rate to use for updating the critics.
tau: Parameter for the polyak averaging used to update the target networks.
target_update_interval: Frequency with which the target Q-networks are updated.
gamma: The discount factor.
buffer_size: The maximum size of the replay buffer.
train_freq: The frequency with which training updates are performed.
target_update_interval: The frequency with which the target network is updated.
learning_starts: The number of timesteps after which learning starts.
random_steps: Actions will be sampled completely at random for this many
timesteps at the beginning of training.
batch_size: The size of batches sampled from the replay buffer over which
updates are performed.
num_workers: The number of parallel workers to use for experience collection.
num_envs_per_worker: The number of synchronous environments to be executed in
each worker.
prioritized_replay: If True, a prioritized experience replay will be used.
prioritized_replay_alpha: Alpha parameter for prioritized replay.
prioritized_replay_beta: Initial beta parameter for prioritized replay.
final_prioritized_replay_beta: The final value of the prioritized replay beta
parameter.
prioritized_replay_beta_steps: Number of steps over which the prioritized
replay beta parameter will be annealed. If None, this will be set to the
total number of training steps.
prioritized_replay_epsilon: Epsilon to add to td-errors when updating
priorities.
initial_noise_scale: The initial scale of the Gaussian noise that is added to
actions for exploration.
final_noise_scale: The final scale of the Gaussian noise that is added to
actions for exploration.
noise_scale_steps: The number of timesteps over which the amount of exploration
noise is annealed from `initial_noise_scale` to `final_noise_scale`. If
None, the total duration of training is used.
use_huber: If True, the Huber loss is used in favor of MSE for critic updates.
use_twin_critic: If True, twin critic networks are used.
policy_delay: The policy is updated once for every `policy_delay` critic
updates.
smooth_target_policy: If true, target policy smoothing is used in the critic
updates.
target_noise: The amount of target noise that is used for smoothing.
target_noise_clip: The value at which target noise is clipped.
"""
def __init__(
self,
env_fn: Callable[[], gym.Env],
network: str = "mlp",
critic_lr: float = 1e-3,
actor_lr: float = 1e-3,
learning_starts: int = 10000,
random_steps: int = 10000,
target_update_interval: int = 1,
tau: float = 0.005,
gamma: float = 0.95,
buffer_size: int = 100000,
train_freq: int = 1,
batch_size: int = 100,
num_workers: int = 1,
num_envs_per_worker: int = 1,
prioritized_replay: bool = False,
prioritized_replay_alpha: float = 0.6,
prioritized_replay_beta: float = 0.4,
final_prioritized_replay_beta: float = 4.0,
prioritized_replay_beta_steps: Optional[int] = None,
prioritized_replay_epsilon: float = 1e-6,
initial_noise_scale: float = 0.1,
final_noise_scale: float = 0.1,
noise_scale_steps: Optional[int] = None,
use_huber: bool = False,
use_twin_critic: bool = True,
policy_delay: int = 2,
smooth_target_policy: bool = True,
target_noise: float = 0.2,
target_noise_clip: float = 0.5,
):
super().__init__(
env_fn,
network,
critic_lr,
actor_lr,
learning_starts,
random_steps,
target_update_interval,
tau,
gamma,
buffer_size,
train_freq,
batch_size,
num_workers,
num_envs_per_worker,
prioritized_replay,
prioritized_replay_alpha,
prioritized_replay_beta,
final_prioritized_replay_beta,
prioritized_replay_beta_steps,
prioritized_replay_epsilon,
initial_noise_scale,
final_noise_scale,
noise_scale_steps,
use_huber,
use_twin_critic,
policy_delay,
smooth_target_policy,
target_noise,
target_noise_clip,
)
|
# Copyright (c) 2022, Leonardo Lamanna
# All rights reserved.
# This source code is licensed under the MIT-style license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
import os
pd.options.display.max_colwidth = 100
def generate_latex_table(data_file, labels, tab_name, caption, header):
with open(tab_name + ".tex", "w") as f:
df = pd.read_excel(data_file, sheet_name="Summary")
df_restricted = df[labels]
f.write(df_restricted.to_latex(index=False, escape=False,
label="tab:{}".format(tab_name),
caption= caption,
header = header))
def generate_comparison_latex_table():
labels = ["Domain", "Neg precision A", "Neg recall A", "Overall precision A", "Overall recall A",
"Neg precision B", "Neg recall B", "Overall precision B", "Overall recall B"]
header = ["Domain", "$P_{\\eff^{-}}$", "$R_{\\eff^{-}}$", "$P$", "$R$",
"$P_{\\eff^{-}}$", "$R_{\\eff^{-}}$", "$P$", "$R$"]
caption = "For each domain:statistics on final metrics of the last instance grouped by " \
"negative effects."
tab_name = "comparison_summary_uncertain"
file_path = os.path.join("comparison_summary_uncertain.xlsx")
generate_latex_table(file_path, labels, tab_name, caption, header)
def generate_comparison_latex_table_fama():
labels = ["Domain", "Tot time", "Overall precision", "Overall recall", "FAMA tot time",
"FAMA precision", "FAMA recall", "Delta act"]
header = ["Domain", "$t$", "$P$", "$R$", "$t$", "$P$", "$R$", "$\delta_{A}$"]
caption = "Comparison among OLAM and FAMA with full observability. FAMA is run with all plan traces " \
"provided in \protect\cite{aineto_AIJ2019}. MODEL WITH UNCERTAIN NEGATIVE EFFECTS AND STRIPS ASSUMPTION."
tab_name = "comparison_fama"
file_path = os.path.join("comparison_fama.xlsx")
generate_latex_table(file_path, labels, tab_name, caption, header)
def generate_summary_latex_table():
# labels = ["Domain", "Instances", "Precs precision", "Precs recall","Pos precision", "Pos recall",
# "Neg precision", "Neg recall", "Overall precision", "Overall recall"]
labels = ["Domain", "Instances", "Precs precision", "Precs recall","Pos precision", "Pos recall",
"Neg precision", "Neg recall", "Average precision", "Average recall"]
header = ["Domain", "$I$", "$P_{\\prec}$", "$R_{\\prec}$", "$P_{\\eff^{+}}$", "$R_{\\eff^{+}}$", "$P_{\\eff^{-}}$",
"$R_{\\eff^{-}}$", "$P$", "$R$"]
caption = "For each domain:statistics on final metrics of the last instance grouped by " \
"preconditions, positive effects and negative ones."
tab_name = "overall_summary_certain_nostripsass"
folder = "../Analysis/IJCAI_Results/Results_certain_NOnegeff_assumption"
file_path = os.path.join(folder, "overall_summary.xlsx")
generate_latex_table(file_path, labels, tab_name, caption, header)
def generate_domain_objects_table():
header = ["Domain", "Objects"]
caption = "For each domain, problem objects of all problems in the generated set."
tab_name = "all_problem_objects"
df = pd.DataFrame({
"Domain":[],
"Objects":[]
})
# df.set_index('Domain', inplace=True)
domain_dataframes = [name for name in os.listdir(os.path.join("..", "Analysis", "Results_cert"))
if not name.startswith("overall")]
for domain_dataframe in domain_dataframes:
domain = domain_dataframe.split("_")[0]
df_domain = pd.read_excel(os.path.join("..", "Analysis", "Results_cert", domain_dataframe),
sheet_name="Objects")
domain_obj_types = [key.strip().lower() for key in list(df_domain) if key.strip().lower() != "total objs"]
for i, row in df_domain.iterrows():
problem_objs = []
for k in domain_obj_types:
problem_objs.append("{} {}".format(k,row["\t" + k]))
eval = {
"Domain":domain,
"Objects":", ".join(problem_objs)
}
df = df.append(eval, ignore_index=True)
with open(tab_name + ".tex", "w") as f:
f.write(df.to_latex(index=False,
label="tab:{}".format(tab_name),
caption= caption,
header = header))
if __name__ == "__main__":
generate_summary_latex_table()
#
# generate_domain_objects_table()
|
#!/usr/bin/env python
import torch
import torch.nn as nn
from colossalai.nn import CheckpointModule
from .utils.dummy_data_generator import DummyDataGenerator
from .registry import non_distributed_component_funcs
class NetWithRepeatedlyComputedLayers(CheckpointModule):
"""
This model is to test with layers which go through forward pass multiple times.
In this model, the fc1 and fc2 call forward twice
"""
def __init__(self, checkpoint=False) -> None:
super().__init__(checkpoint=checkpoint)
self.fc1 = nn.Linear(5, 5)
self.fc2 = nn.Linear(5, 5)
self.fc3 = nn.Linear(5, 2)
self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3]
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class DummyDataLoader(DummyDataGenerator):
def generate(self):
data = torch.rand(16, 5)
label = torch.randint(low=0, high=2, size=(16,))
return data, label
@non_distributed_component_funcs.register(name='repeated_computed_layers')
def get_training_components():
def model_builder(checkpoint=True):
return NetWithRepeatedlyComputedLayers(checkpoint)
trainloader = DummyDataLoader()
testloader = DummyDataLoader()
criterion = torch.nn.CrossEntropyLoss()
return model_builder, trainloader, testloader, torch.optim.Adam, criterion
|
import math
import torch
from ..abstract import ExtendedTorchModule
from ..functional import sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class HardSoftmaxNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
# Define the target weights. Also, put 0 last such that p1 = p2 = 0
# corresponds to p3 = 1 => w = 0.
self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))
# Initialize a tensor, that will be the placeholder for the hard samples
self.register_buffer('sample', torch.LongTensor(out_features, in_features))
# We will only two parameters per weight, this is to prevent the redundancy
# there would otherwise exist. This also makes it much more comparable with
# NAC.
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features, 2))
self.register_buffer('W_hat_k', torch.Tensor(out_features, in_features, 1))
self.register_parameter('bias', None)
def reset_parameters(self):
# Use a gain of sqrt(0.5). Lets assume that softmax'(0) ~ 1, because this
# holds for sigmoid. Then:
# Var[W] = 1 * Var[S_1] - 1 * Var[S_2] + 0 * Var[S_3] = 2 / (fan[in] + fan[out])
# Var[W] = 2 * Var[S_i] = 2 / (fan[in] + fan[out])
# Var[S_i] = 1/2 * 2 / (fan[in] + fan[out])
# sqrt(Var[S_i]) = sqrt(1/2) * sqrt(2 / (fan[in] + fan[out]))
# This is not exactly true, because S_1, S_2, and S_3 are not enterily uncorrelated.
torch.nn.init.xavier_uniform_(self.W_hat, gain=math.sqrt(0.5))
torch.nn.init.constant_(self.W_hat_k, 0)
def forward(self, input, reuse=False):
# Concat trainable and non-trainable weights
W_hat_full = torch.cat((self.W_hat, self.W_hat_k), dim=-1) # size = [out, in, 3]
# Compute W_soft
pi = torch.nn.functional.softmax(W_hat_full, dim=-1)
W_soft = pi @ self.target_weights
# Compute W_hard
if not reuse:
torch.multinomial(pi.view(-1, 3), 1, True, out=self.sample.view(-1))
W_hard = self.target_weights[self.sample]
# Use W_hard in the forward pass, but use W_soft for the gradients.
# This implementation trick comes from torch.nn.functional.gumble_softmax(hard=True)
W = W_hard - W_soft.detach() + W_soft
# Compute the linear multiplication as usual
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class HardSoftmaxNACCell(AbstractRecurrentCell):
"""Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(HardSoftmaxNACLayer, input_size, hidden_size, **kwargs)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .task_agent_reference import TaskAgentReference
class TaskAgent(TaskAgentReference):
"""TaskAgent.
:param _links:
:type _links: :class:`ReferenceLinks <task-agent.v4_1.models.ReferenceLinks>`
:param enabled: Gets or sets a value indicating whether or not this agent should be enabled for job execution.
:type enabled: bool
:param id: Gets the identifier of the agent.
:type id: int
:param name: Gets the name of the agent.
:type name: str
:param oSDescription: Gets the OS of the agent.
:type oSDescription: str
:param status: Gets the current connectivity status of the agent.
:type status: object
:param version: Gets the version of the agent.
:type version: str
:param assigned_request: Gets the request which is currently assigned to this agent.
:type assigned_request: :class:`TaskAgentJobRequest <task-agent.v4_1.models.TaskAgentJobRequest>`
:param authorization: Gets or sets the authorization information for this agent.
:type authorization: :class:`TaskAgentAuthorization <task-agent.v4_1.models.TaskAgentAuthorization>`
:param created_on: Gets the date on which this agent was created.
:type created_on: datetime
:param last_completed_request: Gets the last request which was completed by this agent.
:type last_completed_request: :class:`TaskAgentJobRequest <task-agent.v4_1.models.TaskAgentJobRequest>`
:param max_parallelism: Gets or sets the maximum job parallelism allowed on this host.
:type max_parallelism: int
:param pending_update: Gets the pending update for this agent.
:type pending_update: :class:`TaskAgentUpdate <task-agent.v4_1.models.TaskAgentUpdate>`
:param properties:
:type properties: :class:`object <task-agent.v4_1.models.object>`
:param status_changed_on: Gets the date on which the last connectivity status change occurred.
:type status_changed_on: datetime
:param system_capabilities:
:type system_capabilities: dict
:param user_capabilities:
:type user_capabilities: dict
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'oSDescription': {'key': 'oSDescription', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'version': {'key': 'version', 'type': 'str'},
'assigned_request': {'key': 'assignedRequest', 'type': 'TaskAgentJobRequest'},
'authorization': {'key': 'authorization', 'type': 'TaskAgentAuthorization'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'last_completed_request': {'key': 'lastCompletedRequest', 'type': 'TaskAgentJobRequest'},
'max_parallelism': {'key': 'maxParallelism', 'type': 'int'},
'pending_update': {'key': 'pendingUpdate', 'type': 'TaskAgentUpdate'},
'properties': {'key': 'properties', 'type': 'object'},
'status_changed_on': {'key': 'statusChangedOn', 'type': 'iso-8601'},
'system_capabilities': {'key': 'systemCapabilities', 'type': '{str}'},
'user_capabilities': {'key': 'userCapabilities', 'type': '{str}'}
}
def __init__(self, _links=None, enabled=None, id=None, name=None, oSDescription=None, status=None, version=None, assigned_request=None, authorization=None, created_on=None, last_completed_request=None, max_parallelism=None, pending_update=None, properties=None, status_changed_on=None, system_capabilities=None, user_capabilities=None):
super(TaskAgent, self).__init__(_links=_links, enabled=enabled, id=id, name=name, oSDescription=oSDescription, status=status, version=version)
self.assigned_request = assigned_request
self.authorization = authorization
self.created_on = created_on
self.last_completed_request = last_completed_request
self.max_parallelism = max_parallelism
self.pending_update = pending_update
self.properties = properties
self.status_changed_on = status_changed_on
self.system_capabilities = system_capabilities
self.user_capabilities = user_capabilities
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=line-too-long
from __future__ import print_function
from collections import OrderedDict
import codecs
import json
import os
import platform
import re
import ssl
import sys
import uuid
import base64
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.parse import urlparse # pylint: disable=import-error
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.resource.resources.models import GenericResource, DeploymentMode
from azure.cli.core.parser import IncorrectUsageError
from azure.cli.core.util import get_file_json, read_file_content, shell_safe_json_parse, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType, get_sdk, get_api_version, AZURE_API_PROFILES
from azure.cli.command_modules.resource._client_factory import (
_resource_client_factory, _resource_policy_client_factory, _resource_lock_client_factory,
_resource_links_client_factory, _resource_deploymentscripts_client_factory, _authorization_management_client, _resource_managedapps_client_factory, _resource_templatespecs_client_factory)
from azure.cli.command_modules.resource._validators import _parse_lock_id
from knack.log import get_logger
from knack.prompting import prompt, prompt_pass, prompt_t_f, prompt_choice_list, prompt_int, NoTTYException
from knack.util import CLIError
from msrest.serialization import Serializer
from msrest.pipeline import SansIOHTTPPolicy
from ._validators import MSI_LOCAL_ID
from ._formatters import format_what_if_operation_result
logger = get_logger(__name__)
def _build_resource_id(**kwargs):
from msrestazure.tools import resource_id as resource_id_from_dict
try:
return resource_id_from_dict(**kwargs)
except KeyError:
return None
def _process_parameters(template_param_defs, parameter_lists): # pylint: disable=too-many-statements
def _try_parse_json_object(value):
try:
parsed = _remove_comments_from_json(value, False)
return parsed.get('parameters', parsed)
except Exception: # pylint: disable=broad-except
return None
def _try_load_file_object(file_path):
try:
is_file = os.path.isfile(file_path)
except ValueError:
return None
if is_file is True:
try:
content = read_file_content(file_path)
if not content:
return None
parsed = _remove_comments_from_json(content, False, file_path)
return parsed.get('parameters', parsed)
except Exception as ex:
raise CLIError("Failed to parse {} with exception:\n {}".format(file_path, ex))
return None
def _try_load_uri(uri):
if "://" in uri:
try:
value = _urlretrieve(uri).decode('utf-8')
parsed = _remove_comments_from_json(value, False)
return parsed.get('parameters', parsed)
except Exception: # pylint: disable=broad-except
pass
return None
def _try_parse_key_value_object(template_param_defs, parameters, value):
# support situation where empty JSON "{}" is provided
if value == '{}' and not parameters:
return True
try:
key, value = value.split('=', 1)
except ValueError:
return False
param = template_param_defs.get(key, None)
if param is None:
raise CLIError("unrecognized template parameter '{}'. Allowed parameters: {}"
.format(key, ', '.join(sorted(template_param_defs.keys()))))
param_type = param.get('type', None)
if param_type:
param_type = param_type.lower()
if param_type in ['object', 'array', 'secureobject']:
parameters[key] = {'value': shell_safe_json_parse(value)}
elif param_type in ['string', 'securestring']:
parameters[key] = {'value': value}
elif param_type == 'bool':
parameters[key] = {'value': value.lower() == 'true'}
elif param_type == 'int':
parameters[key] = {'value': int(value)}
else:
logger.warning("Unrecognized type '%s' for parameter '%s'. Interpretting as string.", param_type, key)
parameters[key] = {'value': value}
return True
parameters = {}
for params in parameter_lists or []:
for item in params:
param_obj = _try_load_file_object(item)
if param_obj is None:
param_obj = _try_parse_json_object(item)
if param_obj is None:
param_obj = _try_load_uri(item)
if param_obj is not None:
parameters.update(param_obj)
elif not _try_parse_key_value_object(template_param_defs, parameters, item):
raise CLIError('Unable to parse parameter: {}'.format(item))
return parameters
# pylint: disable=redefined-outer-name
def _find_missing_parameters(parameters, template):
if template is None:
return {}
template_parameters = template.get('parameters', None)
if template_parameters is None:
return {}
missing = OrderedDict()
for parameter_name in template_parameters:
parameter = template_parameters[parameter_name]
if 'defaultValue' in parameter:
continue
if parameters is not None and parameters.get(parameter_name, None) is not None:
continue
missing[parameter_name] = parameter
return missing
def _prompt_for_parameters(missing_parameters, fail_on_no_tty=True): # pylint: disable=too-many-statements
prompt_list = missing_parameters.keys() if isinstance(missing_parameters, OrderedDict) \
else sorted(missing_parameters)
result = OrderedDict()
no_tty = False
for param_name in prompt_list:
param = missing_parameters[param_name]
param_type = param.get('type', 'string').lower()
description = 'Missing description'
metadata = param.get('metadata', None)
if metadata is not None:
description = metadata.get('description', description)
allowed_values = param.get('allowedValues', None)
prompt_str = "Please provide {} value for '{}' (? for help): ".format(param_type, param_name)
while True:
if allowed_values is not None:
try:
ix = prompt_choice_list(prompt_str, allowed_values, help_string=description)
result[param_name] = allowed_values[ix]
except NoTTYException:
result[param_name] = None
no_tty = True
break
elif param_type == 'securestring':
try:
value = prompt_pass(prompt_str, help_string=description)
except NoTTYException:
value = None
no_tty = True
result[param_name] = value
break
elif param_type == 'int':
try:
int_value = prompt_int(prompt_str, help_string=description)
result[param_name] = int_value
except NoTTYException:
result[param_name] = 0
no_tty = True
break
elif param_type == 'bool':
try:
value = prompt_t_f(prompt_str, help_string=description)
result[param_name] = value
except NoTTYException:
result[param_name] = False
no_tty = True
break
elif param_type in ['object', 'array']:
try:
value = prompt(prompt_str, help_string=description)
except NoTTYException:
value = ''
no_tty = True
if value == '':
value = {} if param_type == 'object' else []
else:
try:
value = shell_safe_json_parse(value)
except Exception as ex: # pylint: disable=broad-except
logger.error(ex)
continue
result[param_name] = value
break
else:
try:
result[param_name] = prompt(prompt_str, help_string=description)
except NoTTYException:
result[param_name] = None
no_tty = True
break
if no_tty and fail_on_no_tty:
raise NoTTYException
return result
# pylint: disable=redefined-outer-name
def _get_missing_parameters(parameters, template, prompt_fn, no_prompt=False):
missing = _find_missing_parameters(parameters, template)
if missing:
if no_prompt is True:
logger.warning("Missing input parameters: %s ", ', '.join(sorted(missing.keys())))
else:
try:
prompt_parameters = prompt_fn(missing)
for param_name in prompt_parameters:
parameters[param_name] = {
"value": prompt_parameters[param_name]
}
except NoTTYException:
raise CLIError("Missing input parameters: {}".format(', '.join(sorted(missing.keys()))))
return parameters
def _ssl_context():
if sys.version_info < (3, 4):
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url):
req = urlopen(url, context=_ssl_context())
return req.read()
# pylint: disable=redefined-outer-name
def _remove_comments_from_json(template, preserve_order=True, file_path=None):
from jsmin import jsmin
# When commenting at the bottom of all elements in a JSON object, jsmin has a bug that will wrap lines.
# It will affect the subsequent multi-line processing logic, so deal with this situation in advance here.
template = re.sub(r'(^[\t ]*//[\s\S]*?\n)|(^[\t ]*/\*{1,2}[\s\S]*?\*/)', '', template, flags=re.M)
minified = jsmin(template)
# Get rid of multi-line strings. Note, we are not sending it on the wire rather just extract parameters to prompt for values
result = re.sub(r'"[^"]*?\n[^"]*?(?<!\\)"', '"#Azure Cli#"', minified, re.DOTALL)
try:
return shell_safe_json_parse(result, preserve_order)
except CLIError:
# Because the processing of removing comments and compression will lead to misplacement of error location,
# so the error message should be wrapped.
if file_path:
raise CLIError("Failed to parse '{}', please check whether it is a valid JSON format".format(file_path))
raise CLIError("Failed to parse the JSON data, please check whether it is a valid JSON format")
# pylint: disable=too-many-locals, too-many-statements, too-few-public-methods
def _deploy_arm_template_core_unmodified(cmd, resource_group_name, template_file=None,
template_uri=None, deployment_name=None, parameters=None,
mode=None, rollback_on_error=None, validate_only=False, no_wait=False,
aux_subscriptions=None, aux_tenants=None, no_prompt=False):
DeploymentProperties, TemplateLink, OnErrorDeployment = cmd.get_models('DeploymentProperties', 'TemplateLink',
'OnErrorDeployment')
template_link = None
template_obj = None
on_error_deployment = None
template_content = None
if template_uri:
template_link = TemplateLink(uri=template_uri)
template_obj = _remove_comments_from_json(_urlretrieve(template_uri).decode('utf-8'), file_path=template_uri)
else:
template_content = read_file_content(template_file)
template_obj = _remove_comments_from_json(template_content, file_path=template_file)
if rollback_on_error == '':
on_error_deployment = OnErrorDeployment(type='LastSuccessful')
elif rollback_on_error:
on_error_deployment = OnErrorDeployment(type='SpecificDeployment', deployment_name=rollback_on_error)
template_param_defs = template_obj.get('parameters', {})
template_obj['resources'] = template_obj.get('resources', [])
parameters = _process_parameters(template_param_defs, parameters) or {}
parameters = _get_missing_parameters(parameters, template_obj, _prompt_for_parameters, no_prompt)
parameters = json.loads(json.dumps(parameters))
properties = DeploymentProperties(template=template_content, template_link=template_link,
parameters=parameters, mode=mode, on_error_deployment=on_error_deployment)
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
aux_subscriptions=aux_subscriptions, aux_tenants=aux_tenants)
deployment_client = smc.deployments # This solves the multi-api for you
if not template_uri:
# pylint: disable=protected-access
deployment_client._serialize = JSONSerializer(
deployment_client._serialize.dependencies
)
# Plug this as default HTTP pipeline
from msrest.pipeline import Pipeline
from msrest.pipeline.requests import (
RequestsCredentialsPolicy,
RequestsPatchSession,
PipelineRequestsHTTPSender
)
from msrest.universal_http.requests import RequestsHTTPSender
smc.config.pipeline = Pipeline(
policies=[
JsonCTemplatePolicy(),
smc.config.user_agent_policy,
RequestsPatchSession(),
smc.config.http_logger_policy,
RequestsCredentialsPolicy(smc.config.credentials)
],
sender=PipelineRequestsHTTPSender(RequestsHTTPSender(smc.config))
)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment')
deployment = Deployment(properties=properties)
validation_poller = deployment_client.validate(resource_group_name, deployment_name, deployment)
validation_result = LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
validation_result = deployment_client.validate(resource_group_name, deployment_name, properties)
if validation_result and validation_result.error:
err_message = _build_preflight_error_message(validation_result.error)
raise CLIError(err_message)
if validate_only:
return validation_result
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
return sdk_no_wait(no_wait, deployment_client.create_or_update, resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, deployment_client.create_or_update, resource_group_name, deployment_name, properties)
class JsonCTemplate:
def __init__(self, template_as_bytes):
self.template_as_bytes = template_as_bytes
class JSONSerializer(Serializer):
def body(self, data, data_type, **kwargs):
if data_type in ('Deployment', 'ScopedDeployment', 'DeploymentWhatIf', 'ScopedDeploymentWhatIf'):
# Be sure to pass a DeploymentProperties
template = data.properties.template
if template:
data_as_dict = data.serialize()
data_as_dict["properties"]["template"] = JsonCTemplate(template)
return data_as_dict
return super(JSONSerializer, self).body(data, data_type, **kwargs)
class JsonCTemplatePolicy(SansIOHTTPPolicy):
def on_request(self, request, **kwargs):
http_request = request.http_request
logger.info(http_request.data)
if (getattr(http_request, 'data', {}) or {}).get('properties', {}).get('template'):
template = http_request.data["properties"]["template"]
if not isinstance(template, JsonCTemplate):
raise ValueError()
del http_request.data["properties"]["template"]
# templateLink nad template cannot exist at the same time in deployment_dry_run mode
if "templateLink" in http_request.data["properties"].keys():
del http_request.data["properties"]["templateLink"]
partial_request = json.dumps(http_request.data)
http_request.data = partial_request[:-2] + ", template:" + template.template_as_bytes + r"}}"
http_request.data = http_request.data.encode('utf-8')
# pylint: disable=unused-argument
def deploy_arm_template_at_subscription_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
no_wait=False, handle_extended_json_format=None, no_prompt=False,
confirm_with_what_if=None, what_if_result_format=None,
what_if_exclude_change_types=None, template_spec=None):
if confirm_with_what_if:
what_if_deploy_arm_template_at_subscription_scope(cmd,
template_file=template_file, template_uri=template_uri,
parameters=parameters, deployment_name=deployment_name,
deployment_location=deployment_location,
result_format=what_if_result_format,
exclude_change_types=what_if_exclude_change_types,
no_prompt=no_prompt, template_spec=template_spec)
from knack.prompting import prompt_y_n
if not prompt_y_n("\nAre you sure you want to execute the deployment?"):
return None
return _deploy_arm_template_at_subscription_scope(cmd=cmd,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, deployment_location=deployment_location,
validate_only=False, no_wait=no_wait,
no_prompt=no_prompt, template_spec=template_spec)
# pylint: disable=unused-argument
def validate_arm_template_at_subscription_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
no_wait=False, handle_extended_json_format=None,
no_prompt=False, template_spec=None):
return _deploy_arm_template_at_subscription_scope(cmd=cmd,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, deployment_location=deployment_location,
validate_only=True, no_wait=no_wait,
no_prompt=no_prompt, template_spec=template_spec,)
def _deploy_arm_template_at_subscription_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None, validate_only=False,
no_wait=False, no_prompt=False, template_spec=None):
deployment_properties = _prepare_deployment_properties_unmodified(cmd, template_file=template_file,
template_uri=template_uri, parameters=parameters,
mode='Incremental',
no_prompt=no_prompt,
template_spec=template_spec)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, plug_pipeline=(template_uri is None and template_spec is None))
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment')
deployment = Deployment(properties=deployment_properties, location=deployment_location)
validation_poller = mgmt_client.validate_at_subscription_scope(deployment_name, deployment)
validation_result = LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
validation_result = mgmt_client.validate_at_subscription_scope(deployment_name, deployment_properties, deployment_location)
if validation_result and validation_result.error:
err_message = _build_preflight_error_message(validation_result.error)
raise CLIError(err_message)
if validate_only:
return validation_result
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
return sdk_no_wait(no_wait, mgmt_client.create_or_update_at_subscription_scope, deployment_name, deployment)
return sdk_no_wait(no_wait, mgmt_client.create_or_update_at_subscription_scope, deployment_name,
deployment_properties, deployment_location)
# pylint: disable=unused-argument
def deploy_arm_template_at_resource_group(cmd,
resource_group_name=None,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, mode=None, rollback_on_error=None,
no_wait=False, handle_extended_json_format=None,
aux_subscriptions=None, aux_tenants=None, no_prompt=False,
confirm_with_what_if=None, what_if_result_format=None,
what_if_exclude_change_types=None, template_spec=None):
if confirm_with_what_if:
what_if_deploy_arm_template_at_resource_group(cmd,
resource_group_name=resource_group_name,
template_file=template_file, template_uri=template_uri,
parameters=parameters, deployment_name=deployment_name, mode=mode,
aux_tenants=aux_tenants, result_format=what_if_result_format,
exclude_change_types=what_if_exclude_change_types,
no_prompt=no_prompt, template_spec=template_spec)
from knack.prompting import prompt_y_n
if not prompt_y_n("\nAre you sure you want to execute the deployment?"):
return None
return _deploy_arm_template_at_resource_group(cmd=cmd,
resource_group_name=resource_group_name,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, mode=mode, rollback_on_error=rollback_on_error,
validate_only=False, no_wait=no_wait,
aux_subscriptions=aux_subscriptions, aux_tenants=aux_tenants,
no_prompt=no_prompt, template_spec=template_spec)
# pylint: disable=unused-argument
def validate_arm_template_at_resource_group(cmd,
resource_group_name=None,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, mode=None, rollback_on_error=None,
no_wait=False, handle_extended_json_format=None, no_prompt=False, template_spec=None):
return _deploy_arm_template_at_resource_group(cmd,
resource_group_name=resource_group_name,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, mode=mode, rollback_on_error=rollback_on_error,
validate_only=True, no_wait=no_wait,
no_prompt=no_prompt, template_spec=template_spec)
def _deploy_arm_template_at_resource_group(cmd,
resource_group_name=None,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, mode=None, rollback_on_error=None,
validate_only=False, no_wait=False,
aux_subscriptions=None, aux_tenants=None, no_prompt=False, template_spec=None):
deployment_properties = _prepare_deployment_properties_unmodified(cmd, template_file=template_file,
template_uri=template_uri,
parameters=parameters, mode=mode,
rollback_on_error=rollback_on_error,
no_prompt=no_prompt, template_spec=template_spec)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, aux_subscriptions=aux_subscriptions,
aux_tenants=aux_tenants, plug_pipeline=(template_uri is None and template_spec is None))
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment')
deployment = Deployment(properties=deployment_properties)
validation_poller = mgmt_client.validate(resource_group_name, deployment_name, deployment)
validation_result = LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
validation_result = mgmt_client.validate(resource_group_name, deployment_name, deployment_properties)
if validation_result and validation_result.error:
err_message = _build_preflight_error_message(validation_result.error)
raise CLIError(err_message)
if validate_only:
return validation_result
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
return sdk_no_wait(no_wait, mgmt_client.create_or_update, resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, mgmt_client.create_or_update, resource_group_name, deployment_name, deployment_properties)
# pylint: disable=unused-argument
def deploy_arm_template_at_management_group(cmd,
management_group_id=None,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
no_wait=False, handle_extended_json_format=None, no_prompt=False,
confirm_with_what_if=None, what_if_result_format=None,
what_if_exclude_change_types=None, template_spec=None):
if confirm_with_what_if:
what_if_deploy_arm_template_at_management_group(cmd,
management_group_id=management_group_id,
template_file=template_file, template_uri=template_uri,
parameters=parameters, deployment_name=deployment_name,
deployment_location=deployment_location,
result_format=what_if_result_format,
exclude_change_types=what_if_exclude_change_types,
no_prompt=no_prompt, template_spec=template_spec)
from knack.prompting import prompt_y_n
if not prompt_y_n("\nAre you sure you want to execute the deployment?"):
return None
return _deploy_arm_template_at_management_group(cmd=cmd,
management_group_id=management_group_id,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, deployment_location=deployment_location,
validate_only=False, no_wait=no_wait,
no_prompt=no_prompt, template_spec=template_spec)
# pylint: disable=unused-argument
def validate_arm_template_at_management_group(cmd,
management_group_id=None,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
no_wait=False, handle_extended_json_format=None,
no_prompt=False, template_spec=None):
return _deploy_arm_template_at_management_group(cmd=cmd,
management_group_id=management_group_id,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, deployment_location=deployment_location,
validate_only=True, no_wait=no_wait,
no_prompt=no_prompt, template_spec=template_spec)
def _deploy_arm_template_at_management_group(cmd,
management_group_id=None,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None, validate_only=False,
no_wait=False, no_prompt=False, template_spec=None):
deployment_properties = _prepare_deployment_properties_unmodified(cmd, template_file=template_file,
template_uri=template_uri,
parameters=parameters, mode='Incremental',
no_prompt=no_prompt, template_spec=template_spec)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, plug_pipeline=(template_uri is None and template_spec is None))
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
ScopedDeployment = cmd.get_models('ScopedDeployment')
deployment = ScopedDeployment(properties=deployment_properties, location=deployment_location)
validation_poller = mgmt_client.validate_at_management_group_scope(management_group_id, deployment_name, deployment)
validation_result = LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
validation_result = mgmt_client.validate_at_management_group_scope(management_group_id, deployment_name,
deployment_properties, deployment_location)
if validation_result and validation_result.error:
err_message = _build_preflight_error_message(validation_result.error)
raise CLIError(err_message)
if validate_only:
return validation_result
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
return sdk_no_wait(no_wait, mgmt_client.create_or_update_at_management_group_scope,
management_group_id, deployment_name, deployment)
return sdk_no_wait(no_wait, mgmt_client.create_or_update_at_management_group_scope,
management_group_id, deployment_name, deployment_properties, deployment_location)
# pylint: disable=unused-argument
def deploy_arm_template_at_tenant_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
no_wait=False, handle_extended_json_format=None, no_prompt=False,
confirm_with_what_if=None, what_if_result_format=None,
what_if_exclude_change_types=None, template_spec=None):
if confirm_with_what_if:
what_if_deploy_arm_template_at_tenant_scope(cmd,
template_file=template_file, template_uri=template_uri,
parameters=parameters, deployment_name=deployment_name,
deployment_location=deployment_location,
result_format=what_if_result_format,
exclude_change_types=what_if_exclude_change_types,
no_prompt=no_prompt, template_spec=template_spec)
from knack.prompting import prompt_y_n
if not prompt_y_n("\nAre you sure you want to execute the deployment?"):
return None
return _deploy_arm_template_at_tenant_scope(cmd=cmd,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, deployment_location=deployment_location,
validate_only=False, no_wait=no_wait,
no_prompt=no_prompt, template_spec=template_spec)
# pylint: disable=unused-argument
def validate_arm_template_at_tenant_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
no_wait=False, handle_extended_json_format=None, no_prompt=False, template_spec=None):
return _deploy_arm_template_at_tenant_scope(cmd=cmd,
template_file=template_file, template_uri=template_uri, parameters=parameters,
deployment_name=deployment_name, deployment_location=deployment_location,
validate_only=True, no_wait=no_wait,
no_prompt=no_prompt, template_spec=template_spec)
def _deploy_arm_template_at_tenant_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None, validate_only=False,
no_wait=False, no_prompt=False, template_spec=None):
deployment_properties = _prepare_deployment_properties_unmodified(cmd, template_file=template_file,
template_uri=template_uri,
parameters=parameters, mode='Incremental',
no_prompt=no_prompt, template_spec=template_spec,)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, plug_pipeline=(template_uri is None and template_spec is None))
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
ScopedDeployment = cmd.get_models('ScopedDeployment')
deployment = ScopedDeployment(properties=deployment_properties, location=deployment_location)
validation_poller = mgmt_client.validate_at_tenant_scope(deployment_name=deployment_name, parameters=deployment)
validation_result = LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
validation_result = mgmt_client.validate_at_tenant_scope(deployment_name=deployment_name,
properties=deployment_properties,
location=deployment_location)
if validation_result and validation_result.error:
err_message = _build_preflight_error_message(validation_result.error)
raise CLIError(err_message)
if validate_only:
return validation_result
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
return sdk_no_wait(no_wait, mgmt_client.create_or_update_at_tenant_scope, deployment_name, deployment)
return sdk_no_wait(no_wait, mgmt_client.create_or_update_at_tenant_scope, deployment_name,
deployment_properties, deployment_location)
def what_if_deploy_arm_template_at_resource_group(cmd, resource_group_name,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, mode=DeploymentMode.incremental,
aux_tenants=None, result_format=None,
no_pretty_print=None, no_prompt=False,
exclude_change_types=None, template_spec=None):
what_if_properties = _prepare_deployment_what_if_properties(cmd, template_file, template_uri,
parameters, mode, result_format, no_prompt, template_spec)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, aux_tenants=aux_tenants,
plug_pipeline=(template_uri is None and template_spec is None))
what_if_poller = mgmt_client.what_if(resource_group_name, deployment_name, what_if_properties)
return _what_if_deploy_arm_template_core(cmd.cli_ctx, what_if_poller, no_pretty_print, exclude_change_types)
def what_if_deploy_arm_template_at_subscription_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
result_format=None, no_pretty_print=None, no_prompt=False,
exclude_change_types=None, template_spec=None):
what_if_properties = _prepare_deployment_what_if_properties(cmd, template_file, template_uri, parameters,
DeploymentMode.incremental, result_format, no_prompt, template_spec)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, plug_pipeline=(template_uri is None and template_spec is None))
what_if_poller = mgmt_client.what_if_at_subscription_scope(deployment_name, what_if_properties, deployment_location)
return _what_if_deploy_arm_template_core(cmd.cli_ctx, what_if_poller, no_pretty_print, exclude_change_types)
def what_if_deploy_arm_template_at_management_group(cmd, management_group_id=None,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
result_format=None, no_pretty_print=None, no_prompt=False,
exclude_change_types=None, template_spec=None):
what_if_properties = _prepare_deployment_what_if_properties(cmd, template_file, template_uri, parameters,
DeploymentMode.incremental, result_format, no_prompt, template_spec=template_spec)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, plug_pipeline=(template_uri is None and template_spec is None))
what_if_poller = mgmt_client.what_if_at_management_group_scope(management_group_id, deployment_name,
deployment_location, what_if_properties)
return _what_if_deploy_arm_template_core(cmd.cli_ctx, what_if_poller, no_pretty_print, exclude_change_types)
def what_if_deploy_arm_template_at_tenant_scope(cmd,
template_file=None, template_uri=None, parameters=None,
deployment_name=None, deployment_location=None,
result_format=None, no_pretty_print=None, no_prompt=False,
exclude_change_types=None, template_spec=None):
what_if_properties = _prepare_deployment_what_if_properties(cmd, template_file, template_uri, parameters,
DeploymentMode.incremental, result_format, no_prompt, template_spec)
mgmt_client = _get_deployment_management_client(cmd.cli_ctx, plug_pipeline=(template_uri is None and template_spec is None))
what_if_poller = mgmt_client.what_if_at_tenant_scope(deployment_name, deployment_location, what_if_properties)
return _what_if_deploy_arm_template_core(cmd.cli_ctx, what_if_poller, no_pretty_print, exclude_change_types)
def _what_if_deploy_arm_template_core(cli_ctx, what_if_poller, no_pretty_print, exclude_change_types):
what_if_result = LongRunningOperation(cli_ctx)(what_if_poller)
if what_if_result.error:
# The status code is 200 even when there's an error, because
# it is technically a successful What-If operation. The error
# is on the ARM template but not the operation.
err_message = _build_preflight_error_message(what_if_result.error)
raise CLIError(err_message)
if exclude_change_types:
exclude_change_types = set(map(lambda x: x.lower(), exclude_change_types))
what_if_result.changes = list(
filter(lambda x: x.change_type.lower() not in exclude_change_types, what_if_result.changes)
)
if no_pretty_print:
return what_if_result
try:
if cli_ctx.enable_color:
# Diabling colorama since it will silently strip out the Xterm 256 color codes the What-If formatter
# is using. Unfortuanately, the colors that colorama supports are very limited, which doesn't meet our needs.
from colorama import deinit
deinit()
# Enable virtual terminal mode for Windows console so it processes color codes.
if platform.system() == "Windows":
from ._win_vt import enable_vt_mode
enable_vt_mode()
print(format_what_if_operation_result(what_if_result, cli_ctx.enable_color))
finally:
if cli_ctx.enable_color:
from colorama import init
init()
return None
def _build_preflight_error_message(preflight_error):
err_messages = [f'{preflight_error.code} - {preflight_error.message}']
for detail in preflight_error.details or []:
err_messages.append(_build_preflight_error_message(detail))
return '\n'.join(err_messages)
def _prepare_deployment_properties_unmodified(cmd, template_file=None, template_uri=None, parameters=None,
mode=None, rollback_on_error=None, no_prompt=False, template_spec=None):
cli_ctx = cmd.cli_ctx
DeploymentProperties, TemplateLink, OnErrorDeployment = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
'DeploymentProperties', 'TemplateLink',
'OnErrorDeployment', mod='models')
template_link = None
template_obj = None
on_error_deployment = None
template_content = None
if template_uri:
template_link = TemplateLink(uri=template_uri)
template_obj = _remove_comments_from_json(_urlretrieve(template_uri).decode('utf-8'), file_path=template_uri)
elif template_spec:
template_link = TemplateLink(id=template_spec, mode="Incremental")
template_obj = show_resource(cmd=cmd, resource_ids=[template_spec]).properties['template']
else:
template_content = read_file_content(template_file)
template_obj = _remove_comments_from_json(template_content, file_path=template_file)
if rollback_on_error == '':
on_error_deployment = OnErrorDeployment(type='LastSuccessful')
elif rollback_on_error:
on_error_deployment = OnErrorDeployment(type='SpecificDeployment', deployment_name=rollback_on_error)
template_param_defs = template_obj.get('parameters', {})
template_obj['resources'] = template_obj.get('resources', [])
parameters = _process_parameters(template_param_defs, parameters) or {}
parameters = _get_missing_parameters(parameters, template_obj, _prompt_for_parameters, no_prompt)
parameters = json.loads(json.dumps(parameters))
properties = DeploymentProperties(template=template_content, template_link=template_link,
parameters=parameters, mode=mode, on_error_deployment=on_error_deployment)
return properties
def _prepare_deployment_what_if_properties(cmd, template_file, template_uri, parameters,
mode, result_format, no_prompt, template_spec):
DeploymentWhatIfProperties, DeploymentWhatIfSettings = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
'DeploymentWhatIfProperties', 'DeploymentWhatIfSettings',
mod='models')
deployment_properties = _prepare_deployment_properties_unmodified(cmd=cmd, template_file=template_file, template_uri=template_uri,
parameters=parameters, mode=mode, no_prompt=no_prompt, template_spec=template_spec)
deployment_what_if_properties = DeploymentWhatIfProperties(template=deployment_properties.template, template_link=deployment_properties.template_link,
parameters=deployment_properties.parameters, mode=deployment_properties.mode,
what_if_settings=DeploymentWhatIfSettings(result_format=result_format))
return deployment_what_if_properties
def _get_deployment_management_client(cli_ctx, aux_subscriptions=None, aux_tenants=None, plug_pipeline=True):
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, aux_subscriptions=aux_subscriptions,
aux_tenants=aux_tenants)
deployment_client = smc.deployments # This solves the multi-api for you
if plug_pipeline:
# pylint: disable=protected-access
deployment_client._serialize = JSONSerializer(
deployment_client._serialize.dependencies
)
# Plug this as default HTTP pipeline
from msrest.pipeline import Pipeline
from msrest.pipeline.requests import (
RequestsCredentialsPolicy,
RequestsPatchSession,
PipelineRequestsHTTPSender
)
from msrest.universal_http.requests import RequestsHTTPSender
smc.config.pipeline = Pipeline(
policies=[
JsonCTemplatePolicy(),
smc.config.user_agent_policy,
RequestsPatchSession(),
smc.config.http_logger_policy,
RequestsCredentialsPolicy(smc.config.credentials)
],
sender=PipelineRequestsHTTPSender(RequestsHTTPSender(smc.config))
)
return deployment_client
def _list_resources_odata_filter_builder(resource_group_name=None, resource_provider_namespace=None,
resource_type=None, name=None, tag=None, location=None):
"""Build up OData filter string from parameters """
if tag is not None:
if resource_group_name:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--resource-group\''
'(If the default value for resource group is set, please use \'az configure --defaults group=""\' command to clear it first)')
if resource_provider_namespace:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--namespace\'')
if resource_type:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--resource-type\'')
if name:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--name\'')
if location:
raise IncorrectUsageError('you cannot use \'--tag\' with \'--location\''
'(If the default value for location is set, please use \'az configure --defaults location=""\' command to clear it first)')
filters = []
if resource_group_name:
filters.append("resourceGroup eq '{}'".format(resource_group_name))
if name:
filters.append("name eq '{}'".format(name))
if location:
filters.append("location eq '{}'".format(location))
if resource_type:
if resource_provider_namespace:
f = "'{}/{}'".format(resource_provider_namespace, resource_type)
else:
if not re.match('[^/]+/[^/]+', resource_type):
raise CLIError(
'Malformed resource-type: '
'--resource-type=<namespace>/<resource-type> expected.')
# assume resource_type is <namespace>/<type>. The worst is to get a server error
f = "'{}'".format(resource_type)
filters.append("resourceType eq " + f)
else:
if resource_provider_namespace:
raise CLIError('--namespace also requires --resource-type')
if tag:
tag_name = list(tag.keys())[0] if isinstance(tag, dict) else tag
tag_value = tag[tag_name] if isinstance(tag, dict) else ''
if tag_name:
if tag_name[-1] == '*':
filters.append("startswith(tagname, '%s')" % tag_name[0:-1])
else:
filters.append("tagname eq '%s'" % tag_name)
if tag_value != '':
filters.append("tagvalue eq '%s'" % tag_value)
return ' and '.join(filters)
def _get_auth_provider_latest_api_version(cli_ctx):
rcf = _resource_client_factory(cli_ctx)
api_version = _ResourceUtils.resolve_api_version(rcf, 'Microsoft.Authorization', None, 'providerOperations')
return api_version
def _update_provider(cli_ctx, namespace, registering, wait):
import time
target_state = 'Registered' if registering else 'Unregistered'
rcf = _resource_client_factory(cli_ctx)
if registering:
r = rcf.providers.register(namespace)
else:
r = rcf.providers.unregister(namespace)
if r.registration_state == target_state:
return
if wait:
while True:
time.sleep(10)
rp_info = rcf.providers.get(namespace)
if rp_info.registration_state == target_state:
break
else:
action = 'Registering' if registering else 'Unregistering'
msg_template = '%s is still on-going. You can monitor using \'az provider show -n %s\''
logger.warning(msg_template, action, namespace)
def _build_policy_scope(subscription_id, resource_group_name, scope):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = "Resource group '{}' is redundant because 'scope' is supplied"
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_policy_id(cmd, policy, policy_set_definition, client):
policy_id = policy or policy_set_definition
if not is_valid_resource_id(policy_id):
if policy:
policy_def = _get_custom_or_builtin_policy(cmd, client, policy)
policy_id = policy_def.id
else:
policy_set_def = _get_custom_or_builtin_policy(cmd, client, policy_set_definition, None, None, True)
policy_id = policy_set_def.id
return policy_id
def _parse_management_group_reference(name):
if _is_management_group_scope(name):
parts = name.split('/')
if len(parts) >= 9:
return parts[4], parts[8]
return None, name
def _parse_management_group_id(scope):
if _is_management_group_scope(scope):
parts = scope.split('/')
if len(parts) >= 5:
return parts[4]
return None
def _get_custom_or_builtin_policy(cmd, client, name, subscription=None, management_group=None, for_policy_set=False):
from msrest.exceptions import HttpOperationError
from msrestazure.azure_exceptions import CloudError
policy_operations = client.policy_set_definitions if for_policy_set else client.policy_definitions
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
client.config.subscription_id = subscription_id
try:
if cmd.supported_api_version(min_api='2018-03-01'):
if not management_group:
management_group, name = _parse_management_group_reference(name)
if management_group:
return policy_operations.get_at_management_group(name, management_group)
return policy_operations.get(name)
except (CloudError, HttpOperationError) as ex:
status_code = ex.status_code if isinstance(ex, CloudError) else ex.response.status_code
if status_code == 404:
try:
return policy_operations.get_built_in(name)
except CloudError as ex2:
# When the `--policy` parameter is neither a valid policy definition name nor conforms to the policy definition id format,
# an exception of "AuthorizationFailed" will be reported to mislead customers.
# So we need to modify the exception information thrown here.
if ex2.status_code == 403 and ex2.error and ex2.error.error == 'AuthorizationFailed':
raise IncorrectUsageError('\'--policy\' should be a valid name or id of the policy definition')
raise ex2
raise
def _load_file_string_or_uri(file_or_string_or_uri, name, required=True):
if file_or_string_or_uri is None:
if required:
raise CLIError('--{} is required'.format(name))
return None
url = urlparse(file_or_string_or_uri)
if url.scheme == 'http' or url.scheme == 'https' or url.scheme == 'file':
response = urlopen(file_or_string_or_uri)
reader = codecs.getreader('utf-8')
result = json.load(reader(response))
response.close()
return result
if os.path.exists(file_or_string_or_uri):
return get_file_json(file_or_string_or_uri)
return shell_safe_json_parse(file_or_string_or_uri)
def _call_subscription_get(cmd, lock_client, *args):
if cmd.supported_api_version(max_api='2015-01-01'):
return lock_client.management_locks.get(*args)
return lock_client.management_locks.get_at_subscription_level(*args)
def _extract_lock_params(resource_group_name, resource_provider_namespace,
resource_type, resource_name):
if resource_group_name is None:
return (None, None, None, None)
if resource_name is None:
return (resource_group_name, None, None, None)
parts = resource_type.split('/', 2)
if resource_provider_namespace is None and len(parts) == 2:
resource_provider_namespace = parts[0]
resource_type = parts[1]
return (resource_group_name, resource_name, resource_provider_namespace, resource_type)
def _update_lock_parameters(parameters, level, notes):
if level is not None:
parameters.level = level
if notes is not None:
parameters.notes = notes
def _validate_resource_inputs(resource_group_name, resource_provider_namespace,
resource_type, resource_name):
if resource_group_name is None:
raise CLIError('--resource-group/-g is required.')
if resource_type is None:
raise CLIError('--resource-type is required')
if resource_name is None:
raise CLIError('--name/-n is required')
if resource_provider_namespace is None:
raise CLIError('--namespace is required')
# region Custom Commands
def list_resource_groups(cmd, tag=None): # pylint: disable=no-self-use
""" List resource groups, optionally filtered by a tag.
:param str tag:tag to filter by in 'key[=value]' format
"""
rcf = _resource_client_factory(cmd.cli_ctx)
filters = []
if tag:
key = list(tag.keys())[0]
filters.append("tagname eq '{}'".format(key))
filters.append("tagvalue eq '{}'".format(tag[key]))
filter_text = ' and '.join(filters) if filters else None
groups = rcf.resource_groups.list(filter=filter_text)
return list(groups)
def create_resource_group(cmd, rg_name, location, tags=None, managed_by=None):
""" Create a new resource group.
:param str resource_group_name:the desired resource group name
:param str location:the resource group location
:param str tags:tags in 'a=b c' format
"""
rcf = _resource_client_factory(cmd.cli_ctx)
ResourceGroup = cmd.get_models('ResourceGroup')
parameters = ResourceGroup(
location=location,
tags=tags
)
if cmd.supported_api_version(min_api='2016-09-01'):
parameters.managed_by = managed_by
return rcf.resource_groups.create_or_update(rg_name, parameters)
def update_resource_group(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def export_group_as_template(
cmd, resource_group_name, include_comments=False, include_parameter_default_value=False, resource_ids=None, skip_resource_name_params=False, skip_all_params=False):
"""Captures a resource group as a template.
:param str resource_group_name: the name of the resource group.
:param resource_ids: space-separated resource ids to filter the export by. To export all resources, do not specify this argument or supply "*".
:param bool include_comments: export template with comments.
:param bool include_parameter_default_value: export template parameter with default value.
:param bool skip_resource_name_params: export template and skip resource name parameterization.
:param bool skip_all_params: export template parameter and skip all parameterization.
"""
rcf = _resource_client_factory(cmd.cli_ctx)
export_options = []
if include_comments:
export_options.append('IncludeComments')
if include_parameter_default_value:
export_options.append('IncludeParameterDefaultValue')
if skip_resource_name_params:
export_options.append('SkipResourceNameParameterization')
if skip_all_params:
export_options.append('SkipAllParameterization')
resources = []
if resource_ids is None or resource_ids[0] == "*":
resources = ["*"]
else:
for i in resource_ids:
if is_valid_resource_id(i):
resources.append(i)
else:
raise CLIError('az resource: error: argument --resource-ids: invalid ResourceId value: \'%s\'' % i)
options = ','.join(export_options) if export_options else None
# Exporting a resource group as a template is async since API version 2019-08-01.
if cmd.supported_api_version(min_api='2019-08-01'):
result_poller = rcf.resource_groups.export_template(resource_group_name, resources, options=options)
result = LongRunningOperation(cmd.cli_ctx)(result_poller)
else:
result = rcf.resource_groups.export_template(resource_group_name, resources, options=options)
# pylint: disable=no-member
# On error, server still returns 200, with details in the error attribute
if result.error:
error = result.error
try:
logger.warning(error.message)
except AttributeError:
logger.warning(str(error))
for detail in getattr(error, 'details', None) or []:
logger.error(detail.message)
return result.template
def create_application(cmd, resource_group_name,
application_name, managedby_resource_group_id,
kind, managedapp_definition_id=None, location=None,
plan_name=None, plan_publisher=None, plan_product=None,
plan_version=None, tags=None, parameters=None):
""" Create a new managed application.
:param str resource_group_name:the desired resource group name
:param str application_name:the managed application name
:param str kind:the managed application kind. can be marketplace or servicecatalog
:param str plan_name:the managed application package plan name
:param str plan_publisher:the managed application package plan publisher
:param str plan_product:the managed application package plan product
:param str plan_version:the managed application package plan version
:param str tags:tags in 'a=b c' format
"""
from azure.mgmt.resource.managedapplications.models import Application, Plan
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
rcf = _resource_client_factory(cmd.cli_ctx)
if not location:
location = rcf.resource_groups.get(resource_group_name).location
application = Application(
location=location,
managed_resource_group_id=managedby_resource_group_id,
kind=kind,
tags=tags
)
if kind.lower() == 'servicecatalog':
if managedapp_definition_id:
application.application_definition_id = managedapp_definition_id
else:
raise CLIError('--managedapp-definition-id is required if kind is ServiceCatalog')
elif kind.lower() == 'marketplace':
if (plan_name is None and plan_product is None and
plan_publisher is None and plan_version is None):
raise CLIError('--plan-name, --plan-product, --plan-publisher and \
--plan-version are all required if kind is MarketPlace')
application.plan = Plan(name=plan_name, publisher=plan_publisher, product=plan_product, version=plan_version)
applicationParameters = None
if parameters:
if os.path.exists(parameters):
applicationParameters = get_file_json(parameters)
else:
applicationParameters = shell_safe_json_parse(parameters)
application.parameters = applicationParameters
return racf.applications.create_or_update(resource_group_name, application_name, application)
def show_application(cmd, resource_group_name=None, application_name=None):
""" Gets a managed application.
:param str resource_group_name:the resource group name
:param str application_name:the managed application name
"""
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
return racf.applications.get(resource_group_name, application_name)
def show_applicationdefinition(cmd, resource_group_name=None, application_definition_name=None):
""" Gets a managed application definition.
:param str resource_group_name:the resource group name
:param str application_definition_name:the managed application definition name
"""
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
return racf.application_definitions.get(resource_group_name, application_definition_name)
def create_applicationdefinition(cmd, resource_group_name,
application_definition_name,
lock_level, authorizations,
description, display_name,
package_file_uri=None, create_ui_definition=None,
main_template=None, location=None, tags=None):
""" Create a new managed application definition.
:param str resource_group_name:the desired resource group name
:param str application_definition_name:the managed application definition name
:param str description:the managed application definition description
:param str display_name:the managed application definition display name
:param str package_file_uri:the managed application definition package file uri
:param str create_ui_definition:the managed application definition create ui definition
:param str main_template:the managed application definition main template
:param str tags:tags in 'a=b c' format
"""
from azure.mgmt.resource.managedapplications.models import ApplicationDefinition, ApplicationProviderAuthorization
if not package_file_uri and not create_ui_definition and not main_template:
raise CLIError('usage error: --package-file-uri <url> | --create-ui-definition --main-template')
if package_file_uri:
if create_ui_definition or main_template:
raise CLIError('usage error: must not specify --create-ui-definition --main-template')
if not package_file_uri:
if not create_ui_definition or not main_template:
raise CLIError('usage error: must specify --create-ui-definition --main-template')
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
rcf = _resource_client_factory(cmd.cli_ctx)
if not location:
location = rcf.resource_groups.get(resource_group_name).location
authorizations = authorizations or []
applicationAuthList = []
for name_value in authorizations:
# split at the first ':', neither principalId nor roldeDefinitionId should have a ':'
principalId, roleDefinitionId = name_value.split(':', 1)
applicationAuth = ApplicationProviderAuthorization(
principal_id=principalId,
role_definition_id=roleDefinitionId)
applicationAuthList.append(applicationAuth)
applicationDef = ApplicationDefinition(lock_level=lock_level,
authorizations=applicationAuthList,
package_file_uri=package_file_uri)
applicationDef.display_name = display_name
applicationDef.description = description
applicationDef.location = location
applicationDef.package_file_uri = package_file_uri
applicationDef.create_ui_definition = create_ui_definition
applicationDef.main_template = main_template
applicationDef.tags = tags
return racf.application_definitions.create_or_update(resource_group_name,
application_definition_name, applicationDef)
def list_applications(cmd, resource_group_name=None):
racf = _resource_managedapps_client_factory(cmd.cli_ctx)
if resource_group_name:
applications = racf.applications.list_by_resource_group(resource_group_name)
else:
applications = racf.applications.list_by_subscription()
return list(applications)
def list_deployments_at_subscription_scope(cmd, filter_string=None):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.list_at_subscription_scope(filter=filter_string)
def list_deployments_at_resource_group(cmd, resource_group_name, filter_string=None):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.list_by_resource_group(resource_group_name, filter=filter_string)
def list_deployments_at_management_group(cmd, management_group_id, filter_string=None):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.list_at_management_group_scope(management_group_id, filter=filter_string)
def list_deployments_at_tenant_scope(cmd, filter_string=None):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.list_at_tenant_scope(filter=filter_string)
def get_deployment_at_subscription_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.get_at_subscription_scope(deployment_name)
def get_deployment_at_resource_group(cmd, resource_group_name, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.get(resource_group_name, deployment_name)
def get_deployment_at_management_group(cmd, management_group_id, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.get_at_management_group_scope(management_group_id, deployment_name)
def get_deployment_at_tenant_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.get_at_tenant_scope(deployment_name)
def delete_deployment_at_subscription_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.delete_at_subscription_scope(deployment_name)
def delete_deployment_at_resource_group(cmd, resource_group_name, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.delete(resource_group_name, deployment_name)
def delete_deployment_at_management_group(cmd, management_group_id, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.delete_at_management_group_scope(management_group_id, deployment_name)
def delete_deployment_at_tenant_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.delete_at_tenant_scope(deployment_name)
def cancel_deployment_at_subscription_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.cancel_at_subscription_scope(deployment_name)
def cancel_deployment_at_resource_group(cmd, resource_group_name, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.cancel(resource_group_name, deployment_name)
def cancel_deployment_at_management_group(cmd, management_group_id, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.cancel_at_management_group_scope(management_group_id, deployment_name)
def cancel_deployment_at_tenant_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployments.cancel_at_tenant_scope(deployment_name)
# pylint: disable=unused-argument
def deploy_arm_template(cmd, resource_group_name,
template_file=None, template_uri=None, deployment_name=None,
parameters=None, mode=None, rollback_on_error=None, no_wait=False,
handle_extended_json_format=None, aux_subscriptions=None, aux_tenants=None,
no_prompt=False):
return _deploy_arm_template_core_unmodified(cmd, resource_group_name=resource_group_name,
template_file=template_file, template_uri=template_uri,
deployment_name=deployment_name, parameters=parameters, mode=mode,
rollback_on_error=rollback_on_error, no_wait=no_wait,
aux_subscriptions=aux_subscriptions, aux_tenants=aux_tenants,
no_prompt=no_prompt)
# pylint: disable=unused-argument
def validate_arm_template(cmd, resource_group_name, template_file=None, template_uri=None,
parameters=None, mode=None, rollback_on_error=None, handle_extended_json_format=None,
no_prompt=False):
return _deploy_arm_template_core_unmodified(cmd, resource_group_name, template_file, template_uri,
'deployment_dry_run', parameters, mode, rollback_on_error,
validate_only=True, no_prompt=no_prompt)
def export_template_at_subscription_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
result = rcf.deployments.export_template_at_subscription_scope(deployment_name)
print(json.dumps(result.template, indent=2)) # pylint: disable=no-member
def export_template_at_resource_group(cmd, resource_group_name, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
result = rcf.deployments.export_template(resource_group_name, deployment_name)
print(json.dumps(result.template, indent=2)) # pylint: disable=no-member
def export_template_at_management_group(cmd, management_group_id, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
result = rcf.deployments.export_template_at_management_group_scope(management_group_id, deployment_name)
print(json.dumps(result.template, indent=2)) # pylint: disable=no-member
def export_template_at_tenant_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
result = rcf.deployments.export_template_at_tenant_scope(deployment_name)
print(json.dumps(result.template, indent=2)) # pylint: disable=no-member
def export_deployment_as_template(cmd, resource_group_name, deployment_name):
smc = _resource_client_factory(cmd.cli_ctx)
result = smc.deployments.export_template(resource_group_name, deployment_name)
print(json.dumps(result.template, indent=2)) # pylint: disable=no-member
def create_resource(cmd, properties,
resource_group_name=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None,
resource_id=None, api_version=None, location=None, is_full_object=False,
latest_include_preview=False):
res = _ResourceUtils(cmd.cli_ctx, resource_group_name, resource_provider_namespace,
parent_resource_path, resource_type, resource_name,
resource_id, api_version, latest_include_preview=latest_include_preview)
return res.create_resource(properties, location, is_full_object)
def _get_parsed_resource_ids(resource_ids):
"""
Returns a generator of parsed resource ids. Raise when there is invalid resource id.
"""
if not resource_ids:
return None
for rid in resource_ids:
if not is_valid_resource_id(rid):
raise CLIError('az resource: error: argument --ids: invalid ResourceId value: \'%s\'' % rid)
return ({'resource_id': rid} for rid in resource_ids)
def _get_rsrc_util_from_parsed_id(cli_ctx, parsed_id, api_version, latest_include_preview=False):
return _ResourceUtils(cli_ctx,
parsed_id.get('resource_group', None),
parsed_id.get('resource_namespace', None),
parsed_id.get('resource_parent', None),
parsed_id.get('resource_type', None),
parsed_id.get('resource_name', None),
parsed_id.get('resource_id', None),
api_version,
latest_include_preview=latest_include_preview)
def _create_parsed_id(cli_ctx, resource_group_name=None, resource_provider_namespace=None, parent_resource_path=None,
resource_type=None, resource_name=None):
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cli_ctx)
return {
'resource_group': resource_group_name,
'resource_namespace': resource_provider_namespace,
'resource_parent': parent_resource_path,
'resource_type': resource_type,
'resource_name': resource_name,
'subscription': subscription
}
def _single_or_collection(obj, default=None):
if not obj:
return default
if isinstance(obj, list) and len(obj) == 1:
return obj[0]
return obj
# pylint: unused-argument
def show_resource(cmd, resource_ids=None, resource_group_name=None,
resource_provider_namespace=None, parent_resource_path=None, resource_type=None,
resource_name=None, api_version=None, include_response_body=False, latest_include_preview=False):
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
return _single_or_collection(
[_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview).get_resource(
include_response_body) for id_dict in parsed_ids])
# pylint: disable=unused-argument
def delete_resource(cmd, resource_ids=None, resource_group_name=None,
resource_provider_namespace=None, parent_resource_path=None, resource_type=None,
resource_name=None, api_version=None, latest_include_preview=False):
"""
Deletes the given resource(s).
This function allows deletion of ids with dependencies on one another.
This is done with multiple passes through the given ids.
"""
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
to_be_deleted = [(_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview), id_dict)
for id_dict in parsed_ids]
results = []
from msrestazure.azure_exceptions import CloudError
while to_be_deleted:
logger.debug("Start new loop to delete resources.")
operations = []
failed_to_delete = []
for rsrc_utils, id_dict in to_be_deleted:
try:
operations.append(rsrc_utils.delete())
resource = _build_resource_id(**id_dict) or resource_name
logger.debug("deleting %s", resource)
except CloudError as e:
# request to delete failed, add parsed id dict back to queue
id_dict['exception'] = str(e)
failed_to_delete.append((rsrc_utils, id_dict))
to_be_deleted = failed_to_delete
# stop deleting if none deletable
if not operations:
break
# all operations return result before next pass
for operation in operations:
results.append(operation.result())
if to_be_deleted:
error_msg_builder = ['Some resources failed to be deleted (run with `--verbose` for more information):']
for _, id_dict in to_be_deleted:
logger.info(id_dict['exception'])
resource_id = _build_resource_id(**id_dict) or id_dict['resource_id']
error_msg_builder.append(resource_id)
raise CLIError(os.linesep.join(error_msg_builder))
return _single_or_collection(results)
# pylint: unused-argument
def update_resource(cmd, parameters, resource_ids=None,
resource_group_name=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None, api_version=None,
latest_include_preview=False):
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
return _single_or_collection(
[_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview).update(parameters)
for id_dict in parsed_ids])
# pylint: unused-argument
def tag_resource(cmd, tags, resource_ids=None, resource_group_name=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None, api_version=None,
is_incremental=None, latest_include_preview=False):
""" Updates the tags on an existing resource. To clear tags, specify the --tag option
without anything else. """
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
return _single_or_collection(
[_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview).tag(
tags, is_incremental) for id_dict in parsed_ids])
# pylint: unused-argument
def invoke_resource_action(cmd, action, request_body=None, resource_ids=None,
resource_group_name=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None,
api_version=None, latest_include_preview=False):
""" Invokes the provided action on an existing resource."""
parsed_ids = _get_parsed_resource_ids(resource_ids) or [_create_parsed_id(cmd.cli_ctx,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name)]
return _single_or_collection(
[_get_rsrc_util_from_parsed_id(cmd.cli_ctx, id_dict, api_version, latest_include_preview).invoke_action(
action, request_body) for id_dict in parsed_ids])
def get_deployment_operations(client, resource_group_name, deployment_name, operation_ids):
"""get a deployment's operation."""
result = []
for op_id in operation_ids:
dep = client.get(resource_group_name, deployment_name, op_id)
result.append(dep)
return result
def get_deployment_operations_at_subscription_scope(client, deployment_name, operation_ids):
result = []
for op_id in operation_ids:
deployment = client.get_at_subscription_scope(deployment_name, op_id)
result.append(deployment)
return result
def get_deployment_operations_at_resource_group(client, resource_group_name, deployment_name, operation_ids):
result = []
for op_id in operation_ids:
dep = client.get(resource_group_name, deployment_name, op_id)
result.append(dep)
return result
def get_deployment_operations_at_management_group(client, management_group_id, deployment_name, operation_ids):
result = []
for op_id in operation_ids:
dep = client.get_at_management_group_scope(management_group_id, deployment_name, op_id)
result.append(dep)
return result
def get_deployment_operations_at_tenant_scope(client, deployment_name, operation_ids):
result = []
for op_id in operation_ids:
dep = client.get_at_tenant_scope(deployment_name, op_id)
result.append(dep)
return result
def list_deployment_scripts(cmd, resource_group_name=None):
rcf = _resource_deploymentscripts_client_factory(cmd.cli_ctx)
if resource_group_name is not None:
return rcf.deployment_scripts.list_by_resource_group(resource_group_name)
return rcf.deployment_scripts.list_by_subscription()
def get_deployment_script(cmd, resource_group_name, name):
rcf = _resource_deploymentscripts_client_factory(cmd.cli_ctx)
return rcf.deployment_scripts.get(resource_group_name, name)
def get_deployment_script_logs(cmd, resource_group_name, name):
rcf = _resource_deploymentscripts_client_factory(cmd.cli_ctx)
return rcf.deployment_scripts.get_logs(resource_group_name, name)
def delete_deployment_script(cmd, resource_group_name, name):
rcf = _resource_deploymentscripts_client_factory(cmd.cli_ctx)
rcf.deployment_scripts.delete(resource_group_name, name)
def get_template_spec(cmd, resource_group_name=None, name=None, version=None, template_spec=None):
if template_spec:
id_parts = parse_resource_id(template_spec)
resource_group_name = id_parts.get('resource_group')
name = id_parts.get('name')
version = id_parts.get('resource_name')
if version == name:
version = None
rcf = _resource_templatespecs_client_factory(cmd.cli_ctx)
if version:
return rcf.template_spec_versions.get(resource_group_name, name, version)
return rcf.template_specs.get(resource_group_name, name)
def create_template_spec(cmd, resource_group_name, name, template_file=None, location=None, display_name=None,
description=None, version=None, version_description=None):
artifacts = None
input_template = None
if location is None:
rcf = _resource_client_factory(cmd.cli_ctx)
location = rcf.resource_groups.get(resource_group_name).location
rcf = _resource_templatespecs_client_factory(cmd.cli_ctx)
if version:
if template_file:
from azure.cli.command_modules.resource._packing_engine import (pack)
packed_template = pack(cmd, template_file)
input_template = getattr(packed_template, 'RootTemplate')
artifacts = getattr(packed_template, 'Artifacts')
try: # Check if parent template spec already exists.
rcf.template_specs.get(resource_group_name=resource_group_name, template_spec_name=name)
except Exception: # pylint: disable=broad-except
TemplateSpec = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_TEMPLATESPECS, 'TemplateSpec', mod='models')
template_spec_parent = TemplateSpec(location=location, description=description, display_name=display_name, tags=None)
rcf.template_specs.create_or_update(resource_group_name, name, template_spec_parent)
TemplateSpecVersion = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_TEMPLATESPECS, 'TemplateSpecVersion', mod='models')
template_spec_child = TemplateSpecVersion(location=location, artifacts=artifacts, description=version_description, template=input_template, tags=None)
return rcf.template_spec_versions.create_or_update(resource_group_name, name, version, template_spec_child)
TemplateSpec = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_TEMPLATESPECS, 'TemplateSpec', mod='models')
template_spec_parent = TemplateSpec(location=location, description=description, display_name=display_name, tags=None)
return rcf.template_specs.create_or_update(resource_group_name, name, template_spec_parent)
def update_template_spec(cmd, resource_group_name=None, name=None, template_spec=None, template_file=None, display_name=None,
description=None, version=None, version_description=None):
rcf = _resource_templatespecs_client_factory(cmd.cli_ctx)
if template_spec:
id_parts = parse_resource_id(template_spec)
resource_group_name = id_parts.get('resource_group')
name = id_parts.get('name')
version = id_parts.get('resource_name')
if version == name:
version = None
existing_template = None
artifacts = None
if template_file:
from azure.cli.command_modules.resource._packing_engine import (pack)
packed_template = pack(cmd, template_file)
input_template = getattr(packed_template, 'RootTemplate')
artifacts = getattr(packed_template, 'Artifacts')
if version:
existing_template = rcf.template_spec_versions.get(resource_group_name=resource_group_name, template_spec_name=name, template_spec_version=version)
location = getattr(existing_template, 'location')
version_tags = getattr(existing_template, 'tags')
if version_description is None:
version_description = getattr(existing_template, 'description')
if template_file is None:
input_template = getattr(existing_template, 'template')
TemplateSpecVersion = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_TEMPLATESPECS, 'TemplateSpecVersion', mod='models')
updated_template_spec = TemplateSpecVersion(location=location, artifacts=artifacts, description=version_description, template=input_template, tags=version_tags)
return rcf.template_spec_versions.create_or_update(resource_group_name, name, version, updated_template_spec)
existing_template = rcf.template_specs.get(resource_group_name=resource_group_name, template_spec_name=name)
location = getattr(existing_template, 'location')
tags = getattr(existing_template, 'tags')
if display_name is None:
display_name = getattr(existing_template, 'display_name')
if description is None:
description = getattr(existing_template, 'description')
TemplateSpec = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_TEMPLATESPECS, 'TemplateSpec', mod='models')
root_template = TemplateSpec(location=location, description=description, display_name=display_name, tags=tags)
return rcf.template_specs.create_or_update(resource_group_name, name, root_template)
def export_template_spec(cmd, output_folder, resource_group_name=None, name=None, version=None, template_spec=None):
rcf = _resource_templatespecs_client_factory(cmd.cli_ctx)
if template_spec:
id_parts = parse_resource_id(template_spec)
resource_group_name = id_parts.get('resource_group')
name = id_parts.get('name')
version = id_parts.get('resource_name')
if version == name:
version = None
exported_template = rcf.template_spec_versions.get(resource_group_name, name, version) if version else rcf.template_specs.get(resource_group_name, name)
from azure.cli.command_modules.resource._packing_engine import (unpack)
return unpack(cmd, exported_template, output_folder, (str(name) + '.JSON'))
def delete_template_spec(cmd, resource_group_name=None, name=None, version=None, template_spec=None):
rcf = _resource_templatespecs_client_factory(cmd.cli_ctx)
if template_spec:
id_parts = parse_resource_id(template_spec)
resource_group_name = id_parts.get('resource_group')
name = id_parts.get('name')
version = id_parts.get('resource_name')
if version == name:
version = None
if version:
return rcf.template_specs.delete(resource_group_name=resource_group_name, template_spec_name=name, template_spec_version=version)
return rcf.template_specs.delete(resource_group_name=resource_group_name, template_spec_name=name)
def list_template_specs(cmd, resource_group_name=None, name=None):
rcf = _resource_templatespecs_client_factory(cmd.cli_ctx)
if resource_group_name is not None:
if name is not None:
return rcf.template_spec_versions.list(resource_group_name=resource_group_name, template_spec_name=name)
return rcf.template_specs.list_by_resource_group(resource_group_name)
return rcf.template_specs.list_by_subscription()
def list_deployment_operations_at_subscription_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.list_at_subscription_scope(deployment_name)
def list_deployment_operations_at_resource_group(cmd, resource_group_name, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.list(resource_group_name, deployment_name)
def list_deployment_operations_at_management_group(cmd, management_group_id, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.list_at_management_group_scope(management_group_id, deployment_name)
def list_deployment_operations_at_tenant_scope(cmd, deployment_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.list_at_tenant_scope(deployment_name)
def get_deployment_operation_at_subscription_scope(cmd, deployment_name, op_id):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.get_at_subscription_scope(deployment_name, op_id)
def get_deployment_operation_at_resource_group(cmd, resource_group_name, deployment_name, op_id):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.get(resource_group_name, deployment_name, op_id)
def get_deployment_operation_at_management_group(cmd, management_group_id, deployment_name, op_id):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.get_at_management_group_scope(management_group_id, deployment_name, op_id)
def get_deployment_operation_at_tenant_scope(cmd, deployment_name, op_id):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.deployment_operations.get_at_tenant_scope(deployment_name, op_id)
def list_resources(cmd, resource_group_name=None,
resource_provider_namespace=None, resource_type=None, name=None, tag=None,
location=None):
rcf = _resource_client_factory(cmd.cli_ctx)
if resource_group_name is not None:
rcf.resource_groups.get(resource_group_name)
odata_filter = _list_resources_odata_filter_builder(resource_group_name,
resource_provider_namespace,
resource_type, name, tag, location)
expand = "createdTime,changedTime,provisioningState"
resources = rcf.resources.list(filter=odata_filter, expand=expand)
return list(resources)
def register_provider(cmd, resource_provider_namespace, wait=False):
_update_provider(cmd.cli_ctx, resource_provider_namespace, registering=True, wait=wait)
def unregister_provider(cmd, resource_provider_namespace, wait=False):
_update_provider(cmd.cli_ctx, resource_provider_namespace, registering=False, wait=wait)
def list_provider_operations(cmd):
auth_client = _authorization_management_client(cmd.cli_ctx)
return auth_client.provider_operations_metadata.list()
def show_provider_operations(cmd, resource_provider_namespace):
version = getattr(get_api_version(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION), 'provider_operations_metadata')
auth_client = _authorization_management_client(cmd.cli_ctx)
if version == '2015-07-01':
return auth_client.provider_operations_metadata.get(resource_provider_namespace, version)
return auth_client.provider_operations_metadata.get(resource_provider_namespace)
def move_resource(cmd, ids, destination_group, destination_subscription_id=None):
"""Moves resources from one resource group to another(can be under different subscription)
:param ids: the space-separated resource ids to be moved
:param destination_group: the destination resource group name
:param destination_subscription_id: the destination subscription identifier
"""
# verify all resource ids are valid and under the same group
resources = []
for i in ids:
if is_valid_resource_id(i):
resources.append(parse_resource_id(i))
else:
raise CLIError('Invalid id "{}", as it has no group or subscription field'.format(i))
if len({r['subscription'] for r in resources}) > 1:
raise CLIError('All resources should be under the same subscription')
if len({r['resource_group'] for r in resources}) > 1:
raise CLIError('All resources should be under the same group')
rcf = _resource_client_factory(cmd.cli_ctx)
target = _build_resource_id(subscription=(destination_subscription_id or rcf.config.subscription_id),
resource_group=destination_group)
return rcf.resources.move_resources(resources[0]['resource_group'], ids, target)
def list_features(client, resource_provider_namespace=None):
if resource_provider_namespace:
return client.list(resource_provider_namespace=resource_provider_namespace)
return client.list_all()
def register_feature(client, resource_provider_namespace, feature_name):
logger.warning("Once the feature '%s' is registered, invoking 'az provider register -n %s' is required "
"to get the change propagated", feature_name, resource_provider_namespace)
return client.register(resource_provider_namespace, feature_name)
def unregister_feature(client, resource_provider_namespace, feature_name):
logger.warning("Once the feature '%s' is unregistered, invoking 'az provider register -n %s' is required "
"to get the change propagated", feature_name, resource_provider_namespace)
return client.unregister(resource_provider_namespace, feature_name)
# pylint: disable=inconsistent-return-statements,too-many-locals
def create_policy_assignment(cmd, policy=None, policy_set_definition=None,
name=None, display_name=None, params=None,
resource_group_name=None, scope=None, sku=None,
not_scopes=None, location=None, assign_identity=None,
identity_scope=None, identity_role='Contributor', enforcement_mode='Default'):
"""Creates a policy assignment
:param not_scopes: Space-separated scopes where the policy assignment does not apply.
"""
if bool(policy) == bool(policy_set_definition):
raise CLIError('usage error: --policy NAME_OR_ID | '
'--policy-set-definition NAME_OR_ID')
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
scope = _build_policy_scope(policy_client.config.subscription_id,
resource_group_name, scope)
policy_id = _resolve_policy_id(cmd, policy, policy_set_definition, policy_client)
params = _load_file_string_or_uri(params, 'params', False)
PolicyAssignment = cmd.get_models('PolicyAssignment')
assignment = PolicyAssignment(display_name=display_name, policy_definition_id=policy_id, scope=scope, enforcement_mode=enforcement_mode)
assignment.parameters = params if params else None
if cmd.supported_api_version(min_api='2017-06-01-preview'):
if not_scopes:
kwargs_list = []
for id_arg in not_scopes.split(' '):
if parse_resource_id(id_arg):
kwargs_list.append(id_arg)
else:
logger.error('az policy assignment create error: argument --not-scopes: \
invalid notscopes value: \'%s\'', id_arg)
return
assignment.not_scopes = kwargs_list
PolicySku = cmd.get_models('PolicySku')
policySku = PolicySku(name='A0', tier='Free')
if sku:
policySku = policySku if sku.lower() == 'free' else PolicySku(name='A1', tier='Standard')
assignment.sku = policySku
if cmd.supported_api_version(min_api='2018-05-01'):
if location:
assignment.location = location
identity = None
if assign_identity is not None:
identity = _build_identities_info(cmd, assign_identity)
assignment.identity = identity
if name is None:
name = (base64.urlsafe_b64encode(uuid.uuid4().bytes).decode())[:-2]
createdAssignment = policy_client.policy_assignments.create(scope, name, assignment)
# Create the identity's role assignment if requested
if assign_identity is not None and identity_scope:
from azure.cli.core.commands.arm import assign_identity as _assign_identity_helper
_assign_identity_helper(cmd.cli_ctx, lambda: createdAssignment, lambda resource: createdAssignment, identity_role, identity_scope)
return createdAssignment
def _build_identities_info(cmd, identities):
identities = identities or []
ResourceIdentityType = cmd.get_models('ResourceIdentityType')
identity_type = ResourceIdentityType.none
if not identities or MSI_LOCAL_ID in identities:
identity_type = ResourceIdentityType.system_assigned
ResourceIdentity = cmd.get_models('Identity')
return ResourceIdentity(type=identity_type)
def delete_policy_assignment(cmd, name, resource_group_name=None, scope=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
scope = _build_policy_scope(policy_client.config.subscription_id,
resource_group_name, scope)
policy_client.policy_assignments.delete(scope, name)
def show_policy_assignment(cmd, name, resource_group_name=None, scope=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
scope = _build_policy_scope(policy_client.config.subscription_id,
resource_group_name, scope)
return policy_client.policy_assignments.get(scope, name)
def list_policy_assignment(cmd, disable_scope_strict_match=None, resource_group_name=None, scope=None):
from azure.cli.core.commands.client_factory import get_subscription_id
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
_scope = _build_policy_scope(get_subscription_id(cmd.cli_ctx),
resource_group_name, scope)
id_parts = parse_resource_id(_scope)
subscription = id_parts.get('subscription')
resource_group = id_parts.get('resource_group')
resource_type = id_parts.get('child_type_1') or id_parts.get('type')
resource_name = id_parts.get('child_name_1') or id_parts.get('name')
management_group = _parse_management_group_id(scope)
if management_group:
result = policy_client.policy_assignments.list_for_management_group(management_group_id=management_group, filter='atScope()')
elif all([resource_type, resource_group, subscription]):
namespace = id_parts.get('namespace')
parent_resource_path = '' if not id_parts.get('child_name_1') else (id_parts['type'] + '/' + id_parts['name'])
result = policy_client.policy_assignments.list_for_resource(
resource_group, namespace,
parent_resource_path, resource_type, resource_name)
elif resource_group:
result = policy_client.policy_assignments.list_for_resource_group(resource_group)
elif subscription:
result = policy_client.policy_assignments.list()
elif scope:
raise CLIError('usage error `--scope`: must be a fully qualified ARM ID.')
else:
raise CLIError('usage error: --scope ARM_ID | --resource-group NAME')
if not disable_scope_strict_match:
result = [i for i in result if _scope.lower().strip('/') == i.scope.lower().strip('/')]
return result
def set_identity(cmd, name, scope=None, resource_group_name=None, identity_role='Contributor', identity_scope=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
scope = _build_policy_scope(policy_client.config.subscription_id, resource_group_name, scope)
def getter():
return policy_client.policy_assignments.get(scope, name)
def setter(policyAssignment):
policyAssignment.identity = _build_identities_info(cmd, [MSI_LOCAL_ID])
return policy_client.policy_assignments.create(scope, name, policyAssignment)
from azure.cli.core.commands.arm import assign_identity as _assign_identity_helper
updatedAssignment = _assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role, identity_scope)
return updatedAssignment.identity
def show_identity(cmd, name, scope=None, resource_group_name=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
scope = _build_policy_scope(policy_client.config.subscription_id, resource_group_name, scope)
return policy_client.policy_assignments.get(scope, name).identity
def remove_identity(cmd, name, scope=None, resource_group_name=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
scope = _build_policy_scope(policy_client.config.subscription_id, resource_group_name, scope)
policyAssignment = policy_client.policy_assignments.get(scope, name)
ResourceIdentityType = cmd.get_models('ResourceIdentityType')
ResourceIdentity = cmd.get_models('Identity')
policyAssignment.identity = ResourceIdentity(type=ResourceIdentityType.none)
policyAssignment = policy_client.policy_assignments.create(scope, name, policyAssignment)
return policyAssignment.identity
def enforce_mutually_exclusive(subscription, management_group):
if subscription and management_group:
raise IncorrectUsageError('cannot provide both --subscription and --management-group')
def create_policy_definition(cmd, name, rules=None, params=None, display_name=None, description=None, mode=None,
metadata=None, subscription=None, management_group=None):
rules = _load_file_string_or_uri(rules, 'rules')
params = _load_file_string_or_uri(params, 'params', False)
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
PolicyDefinition = cmd.get_models('PolicyDefinition')
parameters = PolicyDefinition(policy_rule=rules, parameters=params, description=description,
display_name=display_name)
if cmd.supported_api_version(min_api='2016-12-01'):
parameters.mode = mode
if cmd.supported_api_version(min_api='2017-06-01-preview'):
parameters.metadata = metadata
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_definitions.create_or_update_at_management_group(name, parameters, management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_definitions.create_or_update(name, parameters)
def create_policy_setdefinition(cmd, name, definitions, params=None, display_name=None, description=None,
subscription=None, management_group=None, definition_groups=None, metadata=None):
definitions = _load_file_string_or_uri(definitions, 'definitions')
params = _load_file_string_or_uri(params, 'params', False)
definition_groups = _load_file_string_or_uri(definition_groups, 'definition_groups', False)
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
PolicySetDefinition = cmd.get_models('PolicySetDefinition')
parameters = PolicySetDefinition(policy_definitions=definitions, parameters=params, description=description,
display_name=display_name, policy_definition_groups=definition_groups)
if cmd.supported_api_version(min_api='2017-06-01-preview'):
parameters.metadata = metadata
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_set_definitions.create_or_update_at_management_group(name, parameters, management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_set_definitions.create_or_update(name, parameters)
def get_policy_definition(cmd, policy_definition_name, subscription=None, management_group=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
return _get_custom_or_builtin_policy(cmd, policy_client, policy_definition_name, subscription, management_group)
def get_policy_setdefinition(cmd, policy_set_definition_name, subscription=None, management_group=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
return _get_custom_or_builtin_policy(cmd, policy_client, policy_set_definition_name, subscription, management_group, True)
def list_policy_definition(cmd, subscription=None, management_group=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_definitions.list_by_management_group(management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_definitions.list()
def list_policy_setdefinition(cmd, subscription=None, management_group=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_set_definitions.list_by_management_group(management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_set_definitions.list()
def delete_policy_definition(cmd, policy_definition_name, subscription=None, management_group=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_definitions.delete_at_management_group(policy_definition_name, management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_definitions.delete(policy_definition_name)
def delete_policy_setdefinition(cmd, policy_set_definition_name, subscription=None, management_group=None):
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_set_definitions.delete_at_management_group(policy_set_definition_name, management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_set_definitions.delete(policy_set_definition_name)
def update_policy_definition(cmd, policy_definition_name, rules=None, params=None,
display_name=None, description=None, metadata=None, mode=None,
subscription=None, management_group=None):
rules = _load_file_string_or_uri(rules, 'rules', False)
params = _load_file_string_or_uri(params, 'params', False)
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
definition = _get_custom_or_builtin_policy(cmd, policy_client, policy_definition_name, subscription, management_group)
# pylint: disable=line-too-long,no-member
PolicyDefinition = cmd.get_models('PolicyDefinition')
parameters = PolicyDefinition(
policy_rule=rules if rules is not None else definition.policy_rule,
parameters=params if params is not None else definition.parameters,
display_name=display_name if display_name is not None else definition.display_name,
description=description if description is not None else definition.description,
metadata=metadata if metadata is not None else definition.metadata)
if cmd.supported_api_version(min_api='2016-12-01'):
parameters.mode = mode
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_definitions.create_or_update_at_management_group(policy_definition_name, parameters, management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_definitions.create_or_update(policy_definition_name, parameters)
def update_policy_setdefinition(cmd, policy_set_definition_name, definitions=None, params=None,
display_name=None, description=None,
subscription=None, management_group=None, definition_groups=None, metadata=None):
definitions = _load_file_string_or_uri(definitions, 'definitions', False)
params = _load_file_string_or_uri(params, 'params', False)
definition_groups = _load_file_string_or_uri(definition_groups, 'definition_groups', False)
policy_client = _resource_policy_client_factory(cmd.cli_ctx)
definition = _get_custom_or_builtin_policy(cmd, policy_client, policy_set_definition_name, subscription, management_group, True)
# pylint: disable=line-too-long,no-member
PolicySetDefinition = cmd.get_models('PolicySetDefinition')
parameters = PolicySetDefinition(
policy_definitions=definitions if definitions is not None else definition.policy_definitions,
description=description if description is not None else definition.description,
display_name=display_name if display_name is not None else definition.display_name,
parameters=params if params is not None else definition.parameters,
policy_definition_groups=definition_groups if definition_groups is not None else definition.policy_definition_groups,
metadata=metadata if metadata is not None else definition.metadata)
if cmd.supported_api_version(min_api='2018-03-01'):
enforce_mutually_exclusive(subscription, management_group)
if management_group:
return policy_client.policy_set_definitions.create_or_update_at_management_group(policy_set_definition_name, parameters, management_group)
if subscription:
subscription_id = _get_subscription_id_from_subscription(cmd.cli_ctx, subscription)
policy_client.config.subscription_id = subscription_id
return policy_client.policy_set_definitions.create_or_update(policy_set_definition_name, parameters)
def _register_rp(cli_ctx, subscription_id=None):
rp = "Microsoft.Management"
import time
rcf = get_mgmt_service_client(
cli_ctx,
ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id)
rcf.providers.register(rp)
while True:
time.sleep(10)
rp_info = rcf.providers.get(rp)
if rp_info.registration_state == 'Registered':
break
def _get_subscription_id_from_subscription(cli_ctx, subscription): # pylint: disable=inconsistent-return-statements
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cli_ctx)
subscriptions_list = profile.load_cached_subscriptions()
for sub in subscriptions_list:
if subscription in (sub['id'], sub['name']):
return sub['id']
raise CLIError("Subscription not found in the current context.")
def _get_parent_id_from_parent(parent):
if parent is None or _is_management_group_scope(parent):
return parent
return "/providers/Microsoft.Management/managementGroups/" + parent
def _is_management_group_scope(scope):
return scope is not None and scope.lower().startswith("/providers/microsoft.management/managementgroups")
def cli_managementgroups_group_list(cmd, client):
_register_rp(cmd.cli_ctx)
return client.list()
def cli_managementgroups_group_show(
cmd,
client,
group_name,
expand=False,
recurse=False):
_register_rp(cmd.cli_ctx)
if expand:
return client.get(group_name, "children", recurse)
return client.get(group_name)
def cli_managementgroups_group_create(
cmd,
client,
group_name,
display_name=None,
parent=None):
_register_rp(cmd.cli_ctx)
parent_id = _get_parent_id_from_parent(parent)
from azure.mgmt.managementgroups.models import (
CreateManagementGroupRequest, CreateManagementGroupDetails, CreateParentGroupInfo)
create_parent_grp_info = CreateParentGroupInfo(id=parent_id)
create_mgmt_grp_details = CreateManagementGroupDetails(parent=create_parent_grp_info)
create_mgmt_grp_request = CreateManagementGroupRequest(
name=group_name,
display_name=display_name,
details=create_mgmt_grp_details)
return client.create_or_update(group_name, create_mgmt_grp_request)
def cli_managementgroups_group_update_custom_func(
instance,
display_name=None,
parent_id=None):
parent_id = _get_parent_id_from_parent(parent_id)
instance.display_name = display_name
instance.parent_id = parent_id
return instance
def cli_managementgroups_group_update_get():
from azure.mgmt.managementgroups.models import PatchManagementGroupRequest
update_parameters = PatchManagementGroupRequest(display_name=None, parent_id=None)
return update_parameters
def cli_managementgroups_group_update_set(
cmd, client, group_name, parameters=None):
return client.update(group_name, parameters)
def cli_managementgroups_group_delete(cmd, client, group_name):
_register_rp(cmd.cli_ctx)
return client.delete(group_name)
def cli_managementgroups_subscription_add(
cmd, client, group_name, subscription):
subscription_id = _get_subscription_id_from_subscription(
cmd.cli_ctx, subscription)
return client.create(group_name, subscription_id)
def cli_managementgroups_subscription_remove(
cmd, client, group_name, subscription):
subscription_id = _get_subscription_id_from_subscription(
cmd.cli_ctx, subscription)
return client.delete(group_name, subscription_id)
# region Locks
def _validate_lock_params_match_lock(
lock_client, name, resource_group, resource_provider_namespace, parent_resource_path,
resource_type, resource_name):
"""
Locks are scoped to subscription, resource group or resource.
However, the az list command returns all locks for the current scopes
and all lower scopes (e.g. resource group level also includes resource locks).
This can lead to a confusing user experience where the user specifies a lock
name and assumes that it will work, even if they haven't given the right
scope. This function attempts to validate the parameters and help the
user find the right scope, by first finding the lock, and then infering
what it's parameters should be.
"""
locks = lock_client.management_locks.list_at_subscription_level()
found_count = 0 # locks at different levels can have the same name
lock_resource_id = None
for lock in locks:
if lock.name == name:
found_count = found_count + 1
lock_resource_id = lock.id
if found_count == 1:
# If we only found one lock, let's validate that the parameters are correct,
# if we found more than one, we'll assume the user knows what they're doing
# TODO: Add validation for that case too?
resource = parse_resource_id(lock_resource_id)
_resource_group = resource.get('resource_group', None)
_resource_namespace = resource.get('namespace', None)
if _resource_group is None:
return
if resource_group != _resource_group:
raise CLIError(
'Unexpected --resource-group for lock {}, expected {}'.format(
name, _resource_group))
if _resource_namespace is None or _resource_namespace == 'Microsoft.Authorization':
return
if resource_provider_namespace != _resource_namespace:
raise CLIError(
'Unexpected --namespace for lock {}, expected {}'.format(name, _resource_namespace))
if resource.get('child_type_2', None) is None:
_resource_type = resource.get('type', None)
_resource_name = resource.get('name', None)
else:
if resource.get('child_type_3', None) is None:
_resource_type = resource.get('child_type_1', None)
_resource_name = resource.get('child_name_1', None)
parent = (resource['type'] + '/' + resource['name'])
else:
_resource_type = resource.get('child_type_2', None)
_resource_name = resource.get('child_name_2', None)
parent = (resource['type'] + '/' + resource['name'] + '/' +
resource['child_type_1'] + '/' + resource['child_name_1'])
if parent != parent_resource_path:
raise CLIError(
'Unexpected --parent for lock {}, expected {}'.format(
name, parent))
if resource_type != _resource_type:
raise CLIError('Unexpected --resource-type for lock {}, expected {}'.format(
name, _resource_type))
if resource_name != _resource_name:
raise CLIError('Unexpected --resource-name for lock {}, expected {}'.format(
name, _resource_name))
def list_locks(cmd, resource_group=None,
resource_provider_namespace=None, parent_resource_path=None, resource_type=None,
resource_name=None, filter_string=None):
"""
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
:param filter_string: A query filter to use to restrict the results.
:type filter_string: str
"""
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
if resource_group is None:
return lock_client.management_locks.list_at_subscription_level(filter=filter_string)
if resource_name is None:
return lock_client.management_locks.list_at_resource_group_level(
resource_group, filter=filter_string)
return lock_client.management_locks.list_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, filter=filter_string)
# pylint: disable=inconsistent-return-statements
def get_lock(cmd, lock_name=None, resource_group=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None, ids=None):
"""
:param name: The name of the lock.
:type name: str
"""
if ids:
kwargs_list = []
for id_arg in ids:
try:
kwargs_list.append(_parse_lock_id(id_arg))
except AttributeError:
logger.error('az lock show: error: argument --ids: invalid ResourceId value: \'%s\'', id_arg)
return
results = [get_lock(cmd, **kwargs) for kwargs in kwargs_list]
return results[0] if len(results) == 1 else results
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
_validate_lock_params_match_lock(lock_client, lock_name, resource_group,
resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
if resource_group is None:
return _call_subscription_get(cmd, lock_client, lock_name)
if resource_name is None:
return lock_client.management_locks.get_at_resource_group_level(resource_group, lock_name)
if cmd.supported_api_version(max_api='2015-01-01'):
lock_list = list_locks(resource_group, resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
return next((lock for lock in lock_list if lock.name == lock_name), None)
return lock_client.management_locks.get_at_resource_level(
resource_group, resource_provider_namespace,
parent_resource_path or '', resource_type, resource_name, lock_name)
# pylint: disable=inconsistent-return-statements
def delete_lock(cmd, lock_name=None, resource_group=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None, ids=None):
"""
:param name: The name of the lock.
:type name: str
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
"""
if ids:
kwargs_list = []
for id_arg in ids:
try:
kwargs_list.append(_parse_lock_id(id_arg))
except AttributeError:
logger.error('az lock delete: error: argument --ids: invalid ResourceId value: \'%s\'', id_arg)
return
results = [delete_lock(cmd, **kwargs) for kwargs in kwargs_list]
return results[0] if len(results) == 1 else results
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
_validate_lock_params_match_lock(lock_client, lock_name, resource_group,
resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
if resource_group is None:
return lock_client.management_locks.delete_at_subscription_level(lock_name)
if resource_name is None:
return lock_client.management_locks.delete_at_resource_group_level(
resource_group, lock_name)
return lock_client.management_locks.delete_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name)
def create_lock(cmd, lock_name, level,
resource_group=None, resource_provider_namespace=None, notes=None,
parent_resource_path=None, resource_type=None, resource_name=None):
"""
:param name: The name of the lock.
:type name: str
:param resource_provider_namespace: Name of a resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: Path to a parent resource
:type parent_resource_path: str
:param resource_type: The type for the resource with the lock.
:type resource_type: str
:param resource_name: Name of a resource that has a lock.
:type resource_name: str
:param notes: Notes about this lock.
:type notes: str
"""
ManagementLockObject = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_LOCKS, 'ManagementLockObject', mod='models')
parameters = ManagementLockObject(level=level, notes=notes, name=lock_name)
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
if resource_group is None:
return lock_client.management_locks.create_or_update_at_subscription_level(lock_name, parameters)
if resource_name is None:
return lock_client.management_locks.create_or_update_at_resource_group_level(
resource_group, lock_name, parameters)
return lock_client.management_locks.create_or_update_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name, parameters)
# pylint: disable=inconsistent-return-statements
def update_lock(cmd, lock_name=None, resource_group=None, resource_provider_namespace=None, notes=None,
parent_resource_path=None, resource_type=None, resource_name=None, level=None, ids=None):
"""
Allows updates to the lock-type(level) and the notes of the lock
"""
if ids:
kwargs_list = []
for id_arg in ids:
try:
kwargs_list.append(_parse_lock_id(id_arg))
except AttributeError:
logger.error('az lock update: error: argument --ids: invalid ResourceId value: \'%s\'', id_arg)
return
results = [update_lock(cmd, level=level, notes=notes, **kwargs) for kwargs in kwargs_list]
return results[0] if len(results) == 1 else results
lock_client = _resource_lock_client_factory(cmd.cli_ctx)
lock_resource = _extract_lock_params(resource_group, resource_provider_namespace,
resource_type, resource_name)
resource_group = lock_resource[0]
resource_name = lock_resource[1]
resource_provider_namespace = lock_resource[2]
resource_type = lock_resource[3]
_validate_lock_params_match_lock(lock_client, lock_name, resource_group, resource_provider_namespace,
parent_resource_path, resource_type, resource_name)
if resource_group is None:
params = _call_subscription_get(cmd, lock_client, lock_name)
_update_lock_parameters(params, level, notes)
return lock_client.management_locks.create_or_update_at_subscription_level(lock_name, params)
if resource_name is None:
params = lock_client.management_locks.get_at_resource_group_level(resource_group, lock_name)
_update_lock_parameters(params, level, notes)
return lock_client.management_locks.create_or_update_at_resource_group_level(
resource_group, lock_name, params)
if cmd.supported_api_version(max_api='2015-01-01'):
lock_list = list_locks(resource_group, resource_provider_namespace, parent_resource_path,
resource_type, resource_name)
return next((lock for lock in lock_list if lock.name == lock_name), None)
params = lock_client.management_locks.get_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name)
_update_lock_parameters(params, level, notes)
return lock_client.management_locks.create_or_update_at_resource_level(
resource_group, resource_provider_namespace, parent_resource_path or '', resource_type,
resource_name, lock_name, params)
# endregion
# region ResourceLinks
def create_resource_link(cmd, link_id, target_id, notes=None):
links_client = _resource_links_client_factory(cmd.cli_ctx).resource_links
ResourceLinkProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_LINKS,
'ResourceLinkProperties', mod='models')
properties = ResourceLinkProperties(target_id=target_id, notes=notes)
links_client.create_or_update(link_id, properties)
def update_resource_link(cmd, link_id, target_id=None, notes=None):
links_client = _resource_links_client_factory(cmd.cli_ctx).resource_links
params = links_client.get(link_id)
ResourceLinkProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_LINKS,
'ResourceLinkProperties', mod='models')
properties = ResourceLinkProperties(
target_id=target_id if target_id is not None else params.properties.target_id,
# pylint: disable=no-member
notes=notes if notes is not None else params.properties.notes) # pylint: disable=no-member
links_client.create_or_update(link_id, properties)
def list_resource_links(cmd, scope=None, filter_string=None):
links_client = _resource_links_client_factory(cmd.cli_ctx).resource_links
if scope is not None:
return links_client.list_at_source_scope(scope, filter=filter_string)
return links_client.list_at_subscription(filter=filter_string)
# endregion
# region tags
def get_tag_at_scope(cmd, resource_id=None):
rcf = _resource_client_factory(cmd.cli_ctx)
if resource_id is not None:
return rcf.tags.get_at_scope(scope=resource_id)
return rcf.tags.list()
def create_or_update_tag_at_scope(cmd, resource_id=None, tags=None, tag_name=None):
rcf = _resource_client_factory(cmd.cli_ctx)
if resource_id is not None:
if not tags:
raise IncorrectUsageError("Tags could not be empty.")
Tags = cmd.get_models('Tags')
tag_obj = Tags(tags=tags)
return rcf.tags.create_or_update_at_scope(scope=resource_id, properties=tag_obj)
return rcf.tags.create_or_update(tag_name=tag_name)
def delete_tag_at_scope(cmd, resource_id=None, tag_name=None):
rcf = _resource_client_factory(cmd.cli_ctx)
if resource_id is not None:
return rcf.tags.delete_at_scope(scope=resource_id)
return rcf.tags.delete(tag_name=tag_name)
def update_tag_at_scope(cmd, resource_id, tags, operation):
rcf = _resource_client_factory(cmd.cli_ctx)
if not tags:
raise IncorrectUsageError("Tags could not be empty.")
Tags = cmd.get_models('Tags')
tag_obj = Tags(tags=tags)
return rcf.tags.update_at_scope(scope=resource_id, properties=tag_obj, operation=operation)
# endregion
class _ResourceUtils: # pylint: disable=too-many-instance-attributes
def __init__(self, cli_ctx,
resource_group_name=None, resource_provider_namespace=None,
parent_resource_path=None, resource_type=None, resource_name=None,
resource_id=None, api_version=None, rcf=None, latest_include_preview=False):
# if the resouce_type is in format 'namespace/type' split it.
# (we don't have to do this, but commands like 'vm show' returns such values)
if resource_type and not resource_provider_namespace and not parent_resource_path:
parts = resource_type.split('/')
if len(parts) > 1:
resource_provider_namespace = parts[0]
resource_type = parts[1]
self.rcf = rcf or _resource_client_factory(cli_ctx)
if api_version is None:
if resource_id:
api_version = _ResourceUtils._resolve_api_version_by_id(self.rcf, resource_id,
latest_include_preview=latest_include_preview)
else:
_validate_resource_inputs(resource_group_name, resource_provider_namespace,
resource_type, resource_name)
api_version = _ResourceUtils.resolve_api_version(self.rcf,
resource_provider_namespace,
parent_resource_path,
resource_type,
latest_include_preview=latest_include_preview)
self.resource_group_name = resource_group_name
self.resource_provider_namespace = resource_provider_namespace
self.parent_resource_path = parent_resource_path if parent_resource_path else ''
self.resource_type = resource_type
self.resource_name = resource_name
self.resource_id = resource_id
self.api_version = api_version
def create_resource(self, properties, location, is_full_object):
try:
res = json.loads(properties)
except json.decoder.JSONDecodeError as ex:
raise CLIError('Error parsing JSON.\n{}\n{}'.format(properties, ex))
if not is_full_object:
if not location:
if self.resource_id:
rg_name = parse_resource_id(self.resource_id)['resource_group']
else:
rg_name = self.resource_group_name
location = self.rcf.resource_groups.get(rg_name).location
res = GenericResource(location=location, properties=res)
elif res.get('location', None) is None:
raise IncorrectUsageError("location of the resource is required")
if self.resource_id:
resource = self.rcf.resources.create_or_update_by_id(self.resource_id,
self.api_version,
res)
else:
resource = self.rcf.resources.create_or_update(self.resource_group_name,
self.resource_provider_namespace,
self.parent_resource_path,
self.resource_type,
self.resource_name,
self.api_version,
res)
return resource
def get_resource(self, include_response_body=False):
if self.resource_id:
resource = self.rcf.resources.get_by_id(self.resource_id, self.api_version, raw=include_response_body)
else:
resource = self.rcf.resources.get(self.resource_group_name,
self.resource_provider_namespace,
self.parent_resource_path,
self.resource_type,
self.resource_name,
self.api_version,
raw=include_response_body)
if include_response_body:
temp = resource.output
setattr(temp, 'response_body', json.loads(resource.response.content.decode()))
resource = temp
return resource
def delete(self):
if self.resource_id:
return self.rcf.resources.delete_by_id(self.resource_id, self.api_version)
return self.rcf.resources.delete(self.resource_group_name,
self.resource_provider_namespace,
self.parent_resource_path,
self.resource_type,
self.resource_name,
self.api_version)
def update(self, parameters):
if self.resource_id:
return self.rcf.resources.create_or_update_by_id(self.resource_id,
self.api_version,
parameters)
return self.rcf.resources.create_or_update(self.resource_group_name,
self.resource_provider_namespace,
self.parent_resource_path,
self.resource_type,
self.resource_name,
self.api_version,
parameters)
def tag(self, tags, is_incremental=False):
resource = self.get_resource()
if is_incremental is True:
if not tags:
raise CLIError("When modifying tag incrementally, the parameters of tag must have specific values.")
if resource.tags:
resource.tags.update(tags)
tags = resource.tags
# please add the service type that needs to be requested with PATCH type here
# for example: the properties of RecoveryServices/vaults must be filled, and a PUT request that passes back
# to properties will fail due to the lack of properties, so the PATCH type should be used
need_patch_service = ['Microsoft.RecoveryServices/vaults', 'Microsoft.Resources/resourceGroups',
'Microsoft.ContainerRegistry/registries/webhooks',
'Microsoft.ContainerInstance/containerGroups']
if resource is not None and resource.type in need_patch_service:
parameters = GenericResource(tags=tags)
if self.resource_id:
return self.rcf.resources.update_by_id(self.resource_id, self.api_version, parameters)
return self.rcf.resources.update(self.resource_group_name,
self.resource_provider_namespace,
self.parent_resource_path,
self.resource_type,
self.resource_name,
self.api_version,
parameters)
# pylint: disable=no-member
parameters = GenericResource(
location=resource.location,
tags=tags,
plan=resource.plan,
properties=resource.properties,
kind=resource.kind,
managed_by=resource.managed_by,
sku=resource.sku,
identity=resource.identity)
if self.resource_id:
return self.rcf.resources.create_or_update_by_id(self.resource_id, self.api_version,
parameters)
return self.rcf.resources.create_or_update(self.resource_group_name,
self.resource_provider_namespace,
self.parent_resource_path,
self.resource_type,
self.resource_name,
self.api_version,
parameters)
def invoke_action(self, action, request_body):
"""
Formats Url if none provided and sends the POST request with the url and request-body.
"""
from msrestazure.azure_operation import AzureOperationPoller
query_parameters = {}
serialize = self.rcf.resources._serialize # pylint: disable=protected-access
client = self.rcf.resources._client # pylint: disable=protected-access
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/' \
'{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/{action}'
if self.resource_id:
url = client.format_url(
'{resource_id}/{action}',
resource_id=self.resource_id,
action=serialize.url("action", action, 'str'))
else:
url = client.format_url(
url,
resourceGroupName=serialize.url(
"resource_group_name", self.resource_group_name, 'str',
max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
resourceProviderNamespace=serialize.url(
"resource_provider_namespace", self.resource_provider_namespace, 'str'),
parentResourcePath=serialize.url(
"parent_resource_path", self.parent_resource_path, 'str', skip_quote=True),
resourceType=serialize.url("resource_type", self.resource_type, 'str', skip_quote=True),
resourceName=serialize.url("resource_name", self.resource_name, 'str'),
subscriptionId=serialize.url(
"self.config.subscription_id", self.rcf.resources.config.subscription_id, 'str'),
action=serialize.url("action", action, 'str'))
# Construct parameters
query_parameters['api-version'] = serialize.query("api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.rcf.resources.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid4())
if self.rcf.resources.config.accept_language is not None:
header_parameters['accept-language'] = serialize.header(
"self.config.accept_language", self.rcf.resources.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = client.post(url, query_parameters)
return client.send(
request, header_parameters, json.loads(request_body) if request_body else None)
def get_long_running_status(status_link, headers=None):
request = client.get(status_link)
if headers:
request.headers.update(headers)
return client.send(request, header_parameters)
def get_long_running_output(response):
from msrestazure.azure_exceptions import CloudError
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response.text
return AzureOperationPoller(long_running_send, get_long_running_output, get_long_running_status,
self.rcf.resources.config.long_running_operation_timeout)
@staticmethod
def resolve_api_version(rcf, resource_provider_namespace, parent_resource_path, resource_type,
latest_include_preview=False):
provider = rcf.providers.get(resource_provider_namespace)
# If available, we will use parent resource's api-version
resource_type_str = (parent_resource_path.split('/')[0] if parent_resource_path else resource_type)
rt = [t for t in provider.resource_types
if t.resource_type.lower() == resource_type_str.lower()]
if not rt:
raise IncorrectUsageError('Resource type {} not found.'.format(resource_type_str))
if len(rt) == 1 and rt[0].api_versions:
# If latest_include_preview is true,
# the last api-version will be taken regardless of whether it is preview version or not
if latest_include_preview:
return rt[0].api_versions[0]
# Take the latest stable version first.
# if there is no stable version, the latest preview version will be taken.
npv = [v for v in rt[0].api_versions if 'preview' not in v.lower()]
return npv[0] if npv else rt[0].api_versions[0]
raise IncorrectUsageError(
'API version is required and could not be resolved for resource {}'
.format(resource_type))
@staticmethod
def _resolve_api_version_by_id(rcf, resource_id, latest_include_preview=False):
parts = parse_resource_id(resource_id)
if len(parts) == 2 and parts['subscription'] is not None and parts['resource_group'] is not None:
return AZURE_API_PROFILES['latest'][ResourceType.MGMT_RESOURCE_RESOURCES]
if 'namespace' not in parts:
raise CLIError('The type of value entered by --ids parameter is not supported.')
namespace = parts.get('child_namespace_1', parts['namespace'])
if parts.get('child_type_2'):
parent = (parts['type'] + '/' + parts['name'] + '/' +
parts['child_type_1'] + '/' + parts['child_name_1'])
resource_type = parts['child_type_2']
elif parts.get('child_type_1'):
# if the child resource has a provider namespace it is independent of the
# parent, so set the parent to empty
if parts.get('child_namespace_1') is not None:
parent = ''
else:
parent = parts['type'] + '/' + parts['name']
resource_type = parts['child_type_1']
else:
parent = None
resource_type = parts['type']
return _ResourceUtils.resolve_api_version(rcf, namespace, parent, resource_type,
latest_include_preview=latest_include_preview)
|
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
return HttpResponse('{"response": "Synth is running!"}')
def test(request):
return HttpResponse('ANOTHER RESPONSE YO')
|
import py
import sys, shutil, os
from rpython.tool.udir import udir
from pypy.interpreter.gateway import interp2app
from pypy.module._cffi_backend.newtype import _clean_cache
if sys.platform == 'win32':
WIN32 = True
else:
WIN32 = False
class AppTestRecompilerPython:
spaceconfig = dict(usemodules=['_cffi_backend'])
def setup_class(cls):
try:
from cffi import FFI # <== the system one, which
from cffi import recompiler # needs to be at least cffi 1.0.0
from cffi import ffiplatform
except ImportError:
py.test.skip("system cffi module not found or older than 1.0.0")
space = cls.space
SRC = """
#define FOOBAR (-42)
static const int FOOBAZ = -43;
#define BIGPOS 420000000000L
#define BIGNEG -420000000000L
int add42(int x) { return x + 42; }
int globalvar42 = 1234;
const int globalconst42 = 4321;
const char *const globalconsthello = "hello";
struct foo_s;
typedef struct bar_s { int x; signed char a[]; } bar_t;
enum foo_e { AA, BB, CC };
void init_test_re_python(void) { } /* windows hack */
void PyInit__test_re_python(void) { } /* windows hack */
"""
tmpdir = udir.join('test_re_python')
tmpdir.ensure(dir=1)
c_file = tmpdir.join('_test_re_python.c')
c_file.write(SRC)
ext = ffiplatform.get_extension(str(c_file), '_test_re_python',
export_symbols=['add42', 'globalvar42',
'globalconst42', 'globalconsthello'])
outputfilename = ffiplatform.compile(str(tmpdir), ext)
cls.w_extmod = space.wrap(outputfilename)
if WIN32:
unicode_name = u'load\u03betest.dll'
else:
unicode_name = u'load_caf\xe9' + os.path.splitext(outputfilename)[1]
try:
unicode_name.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
unicode_name = None # skip test_dlopen_unicode
if unicode_name is not None:
outputfileUname = os.path.join(unicode(udir), unicode_name)
shutil.copyfile(outputfilename, outputfileUname)
cls.w_extmodU = space.wrap(outputfileUname)
#mod.tmpdir = tmpdir
#
ffi = FFI()
ffi.cdef("""
#define FOOBAR -42
static const int FOOBAZ = -43;
#define BIGPOS 420000000000L
#define BIGNEG -420000000000L
int add42(int);
int globalvar42;
const int globalconst42;
const char *const globalconsthello = "hello";
int no_such_function(int);
int no_such_globalvar;
struct foo_s;
typedef struct bar_s { int x; signed char a[]; } bar_t;
enum foo_e { AA, BB, CC };
typedef struct selfref { struct selfref *next; } *selfref_ptr_t;
void *dlopen(const char *filename, int flags);
int dlclose(void *handle);
""")
ffi.set_source('re_python_pysrc', None)
ffi.emit_python_code(str(tmpdir.join('re_python_pysrc.py')))
#
sub_ffi = FFI()
sub_ffi.cdef("static const int k2 = 121212;")
sub_ffi.include(ffi)
assert 'macro FOOBAR' in ffi._parser._declarations
assert 'macro FOOBAZ' in ffi._parser._declarations
sub_ffi.set_source('re_py_subsrc', None)
sub_ffi.emit_python_code(str(tmpdir.join('re_py_subsrc.py')))
#
cls.w_fix_path = space.appexec([space.wrap(str(tmpdir))], """(path):
def fix_path(ignored=None):
import _cffi_backend # force it to be initialized
import sys
if path not in sys.path:
sys.path.insert(0, path)
return fix_path
""")
cls.w_dl_libpath = space.w_None
if sys.platform != 'win32':
import ctypes.util
cls.w_dl_libpath = space.wrap(ctypes.util.find_library('dl'))
def teardown_method(self, meth):
self.space.appexec([], """():
import sys
for name in ['re_py_subsrc', 're_python_pysrc']:
if name in sys.modules:
del sys.modules[name]
""")
_clean_cache(self.space)
def test_constant_1(self):
self.fix_path()
from re_python_pysrc import ffi
assert ffi.integer_const('FOOBAR') == -42
assert ffi.integer_const('FOOBAZ') == -43
def test_large_constant(self):
self.fix_path()
from re_python_pysrc import ffi
assert ffi.integer_const('BIGPOS') == 420000000000
assert ffi.integer_const('BIGNEG') == -420000000000
def test_function(self):
import _cffi_backend
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.add42(-10) == 32
assert type(lib.add42) is _cffi_backend.FFI.CData
def test_dlopen_unicode(self):
if not getattr(self, 'extmodU', None):
skip("no unicode file name")
import _cffi_backend
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmodU)
assert lib.add42(-10) == 32
def test_dlclose(self):
import _cffi_backend
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
ffi.dlclose(lib)
e = raises(ffi.error, getattr, lib, 'add42')
assert str(e.value) == (
"library '%s' has been closed" % (self.extmod,))
ffi.dlclose(lib) # does not raise
def test_constant_via_lib(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.FOOBAR == -42
assert lib.FOOBAZ == -43
def test_opaque_struct(self):
self.fix_path()
from re_python_pysrc import ffi
ffi.cast("struct foo_s *", 0)
raises(TypeError, ffi.new, "struct foo_s *")
def test_nonopaque_struct(self):
self.fix_path()
from re_python_pysrc import ffi
for p in [ffi.new("struct bar_s *", [5, b"foobar"]),
ffi.new("bar_t *", [5, b"foobar"])]:
assert p.x == 5
assert p.a[0] == ord('f')
assert p.a[5] == ord('r')
def test_enum(self):
self.fix_path()
from re_python_pysrc import ffi
assert ffi.integer_const("BB") == 1
e = ffi.cast("enum foo_e", 2)
assert ffi.string(e) == "CC"
def test_include_1(self):
self.fix_path()
from re_py_subsrc import ffi
assert ffi.integer_const('FOOBAR') == -42
assert ffi.integer_const('FOOBAZ') == -43
assert ffi.integer_const('k2') == 121212
lib = ffi.dlopen(self.extmod) # <- a random unrelated library would be fine
assert lib.FOOBAR == -42
assert lib.FOOBAZ == -43
assert lib.k2 == 121212
#
p = ffi.new("bar_t *", [5, b"foobar"])
assert p.a[4] == ord('a')
def test_global_var(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.globalvar42 == 1234
p = ffi.addressof(lib, 'globalvar42')
lib.globalvar42 += 5
assert p[0] == 1239
p[0] -= 1
assert lib.globalvar42 == 1238
def test_global_const_int(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.globalconst42 == 4321
raises(AttributeError, ffi.addressof, lib, 'globalconst42')
def test_global_const_nonint(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert ffi.string(lib.globalconsthello, 8) == "hello"
raises(AttributeError, ffi.addressof, lib, 'globalconsthello')
def test_rtld_constants(self):
self.fix_path()
from re_python_pysrc import ffi
ffi.RTLD_NOW # check that we have the attributes
ffi.RTLD_LAZY
ffi.RTLD_GLOBAL
def test_no_such_function_or_global_var(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
e = raises(ffi.error, getattr, lib, 'no_such_function')
assert str(e.value).startswith(
"symbol 'no_such_function' not found in library '")
e = raises(ffi.error, getattr, lib, 'no_such_globalvar')
assert str(e.value).startswith(
"symbol 'no_such_globalvar' not found in library '")
def test_check_version(self):
import _cffi_backend
e = raises(ImportError, _cffi_backend.FFI,
"foobar", _version=0x2594)
assert str(e.value).startswith(
"cffi out-of-line Python module 'foobar' has unknown version")
def test_selfref(self):
# based on cffi issue #429
self.fix_path()
from re_python_pysrc import ffi
ffi.new("selfref_ptr_t")
def test_dlopen_handle(self):
import _cffi_backend, sys
self.fix_path()
from re_python_pysrc import ffi
if self.dl_libpath is None:
py.test.skip("uses 'dl' explicitly")
lib1 = ffi.dlopen(self.dl_libpath)
handle = lib1.dlopen(self.extmod.encode(sys.getfilesystemencoding()),
_cffi_backend.RTLD_LAZY)
assert ffi.typeof(handle) == ffi.typeof("void *")
assert handle
lib = ffi.dlopen(handle)
assert lib.add42(-10) == 32
assert type(lib.add42) is _cffi_backend.FFI.CData
err = lib1.dlclose(handle)
assert err == 0
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h26q@cw!pa#7*jjx$sda&0*c0&u&alf4^a)hwoh4j+6)j5y*&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
#!/usr/bin/python3
"""
Defines a class TestFileStorage.
"""
from models.engine.file_storage import FileStorage
import unittest
import models
import os
class TestFileStorage(unittest.TestCase):
"""Represent a TestFileStorage."""
def setUp(self):
"""SetUp method"""
self.file_storage = FileStorage()
def TearDown(self):
"""TearDown method."""
del self.file_storage
def test_docstring(self):
"""Test docstring for the module and the class"""
self.assertIsNotNone(
models.engine.file_storage.__doc__,
"No docstring in the module"
)
self.assertIsNotNone(FileStorage.__doc__, "No docstring in the class")
def test_permissions_file(self):
"""Test File file_storage.py permissions"""
test_file = os.access("models/engine/file_storage.py", os.R_OK)
self.assertTrue(test_file, "Read permissions")
test_file = os.access("models/engine/file_storage.py", os.W_OK)
self.assertTrue(test_file, "Write Permissions")
test_file = os.access("models/engine/file_storage.py", os.X_OK)
self.assertTrue(test_file, "Execute permissions")
def test_type_object(self):
"""Test type object of FileStorage"""
self.assertEqual(
str(type(self.file_storage)),
"<class 'models.engine.file_storage.FileStorage'>")
self.assertIsInstance(self.file_storage, FileStorage)
|
# Rework of model.py
# https://github.com/ddddwee1/sul
# This wrap-up is targeted for better touching low-level implementations
import layers2 as L
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
tf.enable_eager_execution(config=config)
import numpy as np
import os
import random
import time
PARAM_RELU = 0
PARAM_LRELU = 1
PARAM_ELU = 2
PARAM_TANH = 3
PARAM_MFM = 4
PARAM_MFM_FC = 5
PARAM_SIGMOID = 6
######## util functions ###########
def accuracy(pred,y,name='acc', one_hot=True):
with tf.variable_scope(name):
if one_hot:
correct = tf.equal(tf.cast(tf.argmax(pred,-1),tf.int64),tf.cast(tf.argmax(y,-1),tf.int64))
else:
correct = tf.equal(tf.cast(tf.argmax(pred,-1),tf.int64),tf.cast(y,tf.int64))
acc = tf.reduce_mean(tf.cast(correct,tf.float32))
return acc
##########################
# ETA class. I want to see the ETA. It's too boring to wait here.
class ETA():
def __init__(self,max_value):
self.start_time = time.time()
self.max_value = max_value
self.current = 0
def start(self):
self.start_time = time.time()
self.current = 0
def sec2hms(self,sec):
hm = sec//60
s = sec%60
h = hm//60
m = hm%60
return h,m,s
def get_ETA(self,current,is_string=True):
self.current = current
time_div = time.time() - self.start_time
time_remain = time_div * float(self.max_value - self.current) / float(self.current + 1)
h,m,s = self.sec2hms(int(time_remain))
if is_string:
return '%d:%d:%d'%(h,m,s)
else:
return h,m,s
########### universal model class ##########
class Model(tf.contrib.checkpoint.Checkpointable):
def __init__(self,*args,**kwargs):
self.initialized = False
self.variables = []
self.initialize(*args,**kwargs)
def initialize(self,*args,**kwargs):
pass
def _gather_variables(self):
self.variables = []
atrs = dir(self)
for i in atrs:
if i[0] == '_':
continue
obj = getattr(self, i)
self.variables += self._gather_variables_recursive(obj)
def _gather_variables_recursive(self, obj):
result = []
if isinstance(obj, list) or isinstance(obj, tuple):
for sub_obj in obj:
result += self._gather_variables_recursive(sub_obj)
elif isinstance(obj, Model) or isinstance(obj, L.Layer):
result += obj.variables
return result
def get_variables(self, layers=None):
if layers is None:
return self.variables
else:
res = []
for l in layers:
res += l.variables
return res
def set_bn_training(self, is_training):
atrs = dir(self)
# print(atrs)
for i in atrs:
if i[0] == '_':
continue
obj = getattr(self, i)
self._set_bn_training_recursive(obj, is_training)
def _set_bn_training_recursive(self, obj, is_training):
if isinstance(obj, list):
for sub_obj in obj:
self._set_bn_training_recursive(sub_obj, is_training)
if isinstance(obj, Model) and obj!=self:
obj.set_bn_training(is_training)
if isinstance(obj, L.batch_norm):
obj.is_training = is_training
def set_bn_epsilon(self, epsilon):
atrs = dir(self)
# print(atrs)
for i in atrs:
if i[0] == '_':
continue
obj = getattr(self, i)
self._set_bn_epsilon_recursive(obj, epsilon)
def _set_bn_epsilon_recursive(self, obj, epsilon):
if isinstance(obj, list):
for sub_obj in obj:
self._set_bn_training_recursive(sub_obj, epsilon)
if isinstance(obj, Model) and obj!=self:
obj.set_bn_training(epsilon)
if isinstance(obj, L.batch_norm):
obj.epsilon = epsilon
def __call__(self, x, *args, **kwargs):
x = tf.convert_to_tensor(x, preferred_dtype=tf.float32)
res = self.forward(x, *args, **kwargs)
if not self.initialized:
self._gather_variables()
self.initialized = True
return res
########### universal layer classes ##########
class ConvLayer(Model):
def initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):
self.conv = L.conv2D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if activation!=-1:
self.activation = L.activation(activation)
def forward(self,x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
if self.activation_!=-1:
x = self.activation(x)
return x
class ConvLayer1D(Model):
def initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):
self.conv = L.conv1D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if activation!=-1:
self.activation = L.activation(activation)
def forward(self,x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
if self.activation_!=-1:
x = self.activation(x)
return x
class ConvLayer3D(Model):
def initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):
self.conv = L.conv3D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if activation!=-1:
self.activation = L.activation(activation)
def forward(self,x):
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
if self.activation_!=-1:
x = self.activation(x)
return x
class DeconvLayer(Model):
def initialize(self, size, outchn, activation=-1, stride=1, usebias=True, pad='SAME', batch_norm=False):
self.deconv = L.deconv2D(size,outchn,stride=stride,usebias=usebias,pad=pad, name=None)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if activation!=-1:
self.activation = L.activation(activation)
def forward(self,x):
x = self.deconv(x)
if self.batch_norm:
x = self.bn(x)
if self.activation_!=-1:
x = self.activation(x)
return x
class DeconvLayer3D(Model):
def initialize(self, size, outchn, activation=-1, stride=1, usebias=True, pad='SAME', batch_norm=False):
self.deconv = L.deconv3D(size,outchn,stride=stride,usebias=usebias,pad=pad, name=None)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if activation!=-1:
self.activation = L.activation(activation)
def forward(self,x):
x = self.deconv(x)
if self.batch_norm:
x = self.bn(x)
if self.activation_!=-1:
x = self.activation(x)
return x
class Dense(Model):
def initialize(self, outsize, usebias=True, batch_norm=False, activation=-1):
self.fclayer = L.fcLayer(outsize,usebias=usebias)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if activation!=-1:
self.activation = L.activation(activation)
def forward(self,x):
x = self.fclayer(x)
if self.batch_norm:
x = self.bn(x)
if self.activation_!=-1:
x = self.activation(x)
return x
class GraphConvLayer(Model):
def initialize(self, outsize, adj_mtx=None, adj_fn=None, usebias=True, activation=-1, batch_norm=False):
self.GCL = L.graphConvLayer(outsize, adj_mtx=adj_mtx, adj_fn=adj_fn, usebias=usebias)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if activation!=-1:
self.activation = L.activation(activation)
def forward(self, x):
x = self.GCL(x)
if self.batch_norm:
x = self.bn(x)
if self.activation_!=-1:
x = self.activation(x)
return x
flatten = L.flatten()
maxPool = L.maxpoolLayer
avgPool = L.avgpoolLayer
########### higher wrapped block ##########
class ResBlock(Model):
def initialize(self, outchn, stride=1, ratio=4, activation=PARAM_RELU):
self.outchn = outchn
# self.stride = stride
self.activ = L.activation(activation)
self.bn = L.batch_norm()
self.l1 = ConvLayer(1, outchn//ratio, activation=PARAM_RELU, batch_norm=True)
self.l2 = ConvLayer(3, outchn//ratio, activation=PARAM_RELU, batch_norm=True, stride=stride)
self.l3 = ConvLayer(1, outchn)
self.shortcut_conv = ConvLayer(1, outchn, activation=PARAM_RELU, stride=stride)
self.shortcut_pool = L.maxpoolLayer(stride)
def forward(self, x):
inshape = x.get_shape().as_list()[-1]
if inshape==self.outchn:
short = self.shortcut_pool(x)
else:
short = self.shortcut_conv(x)
branch = self.bn(x)
branch = self.activ(branch)
branch = self.l1(branch)
branch = self.l2(branch)
branch = self.l3(branch)
return branch + short
class Sequential(Model):
def initialize(self, modules):
self.modules = modules
def forward(self, x):
for m in self.modules:
x = m(x)
return x
########### saver ##########
class Saver():
def __init__(self, model, optim=None):
self.mod = model
self.obj = tf.contrib.checkpoint.Checkpointable()
self.obj.m = self.mod
self.optim = optim
if optim is None:
self.ckpt = tf.train.Checkpoint(model=self.obj, optimizer_step=tf.train.get_or_create_global_step())
else:
self.ckpt = tf.train.Checkpoint(optimizer=optim, model=self.obj, optimizer_step=tf.train.get_or_create_global_step())
def save(self, path):
print('Saving model to path:',path)
head, tail = os.path.split(path)
if not os.path.exists(head):
os.makedirs(head)
self.ckpt.save(path)
print('Model saved to path:',path)
def restore(self, path, ptype='folder'):
print('Load from:', path)
try:
if ptype=='folder':
last_ckpt = tf.train.latest_checkpoint(path)
print('Checkpoint:', last_ckpt)
if last_ckpt is None:
print('No model found in checkpoint.')
print('Model will auto-initialize after first iteration.')
self.ckpt.restore(last_ckpt)
else:
self.ckpt.restore(path)
print('Finish loading.')
except Exception as e:
print('Model restore failed, Exception:',e)
print('Model will auto-initialize after first iteration.')
######### Gradient accumulator #########
class GradAccumulator():
def __init__(self):
self.steps = 0
self.grads = []
def accumulate(self, grads):
if len(grads) == 0:
self.grads = grads
else:
for old_g, new_g in zip(self.grads, grads):
old_g.assign_add(new_g)
self.steps += 1
def get_gradient(self):
res = [i/self.steps for i in self.grads]
self.grads = []
self.steps = 0
return res
def get_step(self):
return self.steps
######### Data Reader Template (serial) ##########
class DataReaderSerial():
def __init__(self, one_hot=None):
self.data_pos = 0
self.val_pos = 0
self.data = []
self.val = []
self.one_hot = False
if one_hot is not None:
self.one_hot = True
self.eye = np.eye(one_hot)
self.load_data()
def get_next_batch(self,BSIZE):
if self.data_pos + BSIZE > len(self.data):
random.shuffle(self.data)
self.data_pos = 0
batch = self.data[self.data_pos : self.data_pos+BSIZE]
x = [i[0] for i in batch]
y = [i[1] for i in batch]
if self.one_hot:
y = self.eye[np.array(y)]
self.data_pos += BSIZE
return x,y
def get_val_next_batch(self, BSIZE):
if self.val_pos + BSIZE >= len(self.val):
batch = self.val[self.val_pos:]
random.shuffle(self.val)
self.val_pos = 0
is_end = True
else:
batch = self.data[self.data_pos : self.data_pos+BSIZE]
is_end = False
x = [i[0] for i in batch]
y = [i[1] for i in batch]
if self.one_hot:
y = self.eye[np.array(y)]
self.val_pos += BSIZE
return x,y, is_end
def get_train_iter(self, BSIZE):
return len(self.data)//BSIZE
def get_val_iter(self, BSIZE):
return len(self.val)//BSIZE + 1
class ListReader():
def __init__(self, one_hot=None):
self.data_pos = 0
self.val_pos = 0
self.data = []
self.val = []
self.one_hot = False
if one_hot is not None:
self.one_hot = True
self.eye = np.eye(one_hot)
self.load_data()
def get_next_batch(self,BSIZE):
if self.data_pos + BSIZE > len(self.data):
random.shuffle(self.data)
self.data_pos = 0
batch = self.data[self.data_pos : self.data_pos+BSIZE]
x = [i[0] for i in batch]
y = [i[1] for i in batch]
if self.one_hot:
y = self.eye[np.array(y)]
self.data_pos += BSIZE
x = [self.process_img(i) for i in x]
return x,y
def get_val_next_batch(self, BSIZE):
if self.val_pos + BSIZE >= len(self.val):
batch = self.val[self.val_pos:]
random.shuffle(self.val)
self.val_pos = 0
is_end = True
else:
batch = self.data[self.data_pos : self.data_pos+BSIZE]
is_end = False
x = [i[0] for i in batch]
y = [i[1] for i in batch]
if self.one_hot:
y = self.eye[np.array(y)]
self.val_pos += BSIZE
x = [self.process_img(i) for i in x]
return x,y, is_end
def get_train_iter(self, BSIZE):
return len(self.data)//BSIZE
def get_val_iter(self, BSIZE):
return len(self.val)//BSIZE + 1
######### Data Reader Template (parallel) ##########
# multi-process to read data
class DataReader():
def __init__(self, data, fn, batch_size, shuffle=False, random_sample=False, processes=2, post_fn=None):
from multiprocessing import Pool
self.pool = Pool(processes)
print('Starting parallel data loader...')
self.process_fn = fn
self.data = data
self.batch_size = batch_size
self.position = batch_size
self.post_fn = post_fn
self.random_sample = random_sample
self.shuffle = shuffle
if shuffle:
random.shuffle(self.data)
self._start_p(self.data[:batch_size])
def _start_p(self, data):
self.ps = []
for i in data:
self.ps.append(self.pool.apply_async(self.process_fn, [i]))
def get_next_batch(self):
# print('call')
# fetch data
res = [i.get() for i in self.ps]
# start new pre-fetch
if self.random_sample:
batch = random.sample(self.data, self.batch_size)
else:
if self.position + self.batch_size > len(self.data):
self.position = 0
if self.shuffle:
random.shuffle(self.data)
batch = self.data[self.position:self.position+self.batch_size]
self.position += self.batch_size
self._start_p(batch)
# post_process the data
if self.post_fn is not None:
res = self.post_fn(res)
return res
######### short-cut functions #########
gradient_reverse = L.gradient_reverse
def pad(x, pad):
if isinstance(pad, list):
x = tf.pad(x, [[0,0],[pad[0],pad[1]], [pad[2],pad[3]], [0,0]])
else:
x = tf.pad(x, [[0,0],[pad,pad],[pad,pad],[0,0]])
return x
def pad3D(x, pad):
if isinstance(pad, list):
x = tf.pad(x, [[0,0],[pad[0],pad[1]], [pad[2],pad[3]], [pad[4], pad[5]], [0,0]])
else:
x = tf.pad(x, [[0,0],[pad,pad],[pad,pad],[pad,pad],[0,0]])
return x
def image_transform(x, H, out_shape=None, interpolation='NEAREST'):
# Will produce error if not specify 'output_shape' in eager mode
shape = x.get_shape().as_list()
if out_shape is None:
if len(shape)==4:
out_shape = shape[1:3]
else:
out_shape = shape[:2]
return tf.contrib.image.transform(x, H, interpolation=interpolation, output_shape=out_shape)
def zip_grad(grads, vars):
assert len(grads)==len(vars)
grads_1 = []
vars_1 = []
for i in range(len(grads)):
if not grads[i] is None:
grads_1.append(grads[i])
vars_1.append(vars[i])
assert len(grads_1)!=0
return zip(grads_1, vars_1)
|
"""
Tests the continuous link flap in SONiC.
Parameters:
--orch_cpu_threshold <port> (int): Which port you want the test to send traffic
to. Default is 3.
"""
import logging
import time
import pytest
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common import port_toggle
from tests.platform_tests.link_flap.link_flap_utils import build_test_candidates, toggle_one_link, check_orch_cpu_utilization, check_bgp_routes, check_portchannel_status
from tests.common.utilities import wait_until
from tests.common.devices.eos import EosHost
from tests.common.devices.sonic import SonicHost
pytestmark = [
pytest.mark.disable_loganalyzer,
pytest.mark.topology('any')
]
class TestContLinkFlap(object):
"""
TestContLinkFlap class for continuous link flap
"""
def test_cont_link_flap(self, request, duthosts, nbrhosts, enum_rand_one_per_hwsku_frontend_hostname, fanouthosts, bring_up_dut_interfaces, tbinfo):
"""
Validates that continuous link flap works as expected
Test steps:
1.) Flap all interfaces one by one in 1-3 iteration
to cause BGP Flaps.
2.) Flap all interfaces on peer (FanOutLeaf) one by one 1-3 iteration
to cause BGP Flaps.
3.) Watch for memory (show system-memory) ,orchagent CPU Utilization
and Redis_memory.
Pass Criteria: All routes must be re-learned with < 5% increase in Redis and
ORCH agent CPU consumption below threshold after 3 mins after stopping flaps.
"""
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
orch_cpu_threshold = request.config.getoption("--orch_cpu_threshold")
# Record memory status at start
memory_output = duthost.shell("show system-memory")["stdout"]
logging.info("Memory Status at start: %s", memory_output)
# Record Redis Memory at start
start_time_redis_memory = duthost.shell("redis-cli info memory | grep used_memory_human | sed -e 's/.*:\(.*\)M/\\1/'")["stdout"]
logging.info("Redis Memory: %s M", start_time_redis_memory)
# Record ipv4 route counts at start
sumv4, sumv6 = duthost.get_ip_route_summary()
totalsv4 = sumv4.get('Totals', {})
totalsv6 = sumv6.get('Totals', {})
start_time_ipv4_route_counts = totalsv4.get('routes', 0)
start_time_ipv6_route_counts = totalsv6.get('routes', 0)
logging.info("IPv4 routes: start {}, summary {}".format(start_time_ipv4_route_counts, sumv4))
logging.info("IPv6 routes: start {}, summary {}".format(start_time_ipv6_route_counts, sumv6))
# Make Sure Orch CPU < orch_cpu_threshold before starting test.
logging.info("Make Sure orchagent CPU utilization is less that %d before link flap", orch_cpu_threshold)
pytest_assert(wait_until(100, 2, 0, check_orch_cpu_utilization, duthost, orch_cpu_threshold),
"Orch CPU utilization {} > orch cpu threshold {} before link flap"
.format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold))
# Flap all interfaces one by one on DUT
for iteration in range(3):
logging.info("%d Iteration flap all interfaces one by one on DUT", iteration + 1)
port_toggle(duthost, tbinfo, watch=True)
# Flap all interfaces one by one on Peer Device
for iteration in range(3):
logging.info("%d Iteration flap all interfaces one by one on Peer Device", iteration + 1)
candidates = build_test_candidates(duthost, fanouthosts, 'all_ports')
pytest_require(candidates, "Didn't find any port that is admin up and present in the connection graph")
for dut_port, fanout, fanout_port in candidates:
toggle_one_link(duthost, dut_port, fanout, fanout_port, watch=True)
config_facts = duthost.get_running_config_facts()
for portchannel in config_facts['PORTCHANNEL'].keys():
pytest_assert(check_portchannel_status(duthost, portchannel, "up", verbose=True),
"Fail: dut interface {}: link operational down".format(portchannel))
# Make Sure all ipv4/ipv6 routes are relearned with jitter of ~5
if not wait_until(120, 2, 0, check_bgp_routes, duthost, start_time_ipv4_route_counts, start_time_ipv6_route_counts):
endv4, endv6 = duthost.get_ip_route_summary()
failmsg = []
failmsg.append(
"IP routes are not equal after link flap: before ipv4 {} ipv6 {}, after ipv4 {} ipv6 {}".format(sumv4,
sumv6,
endv4,
endv6))
nei_meta = config_facts.get('DEVICE_NEIGHBOR_METADATA', {})
for k in nei_meta.keys():
nbrhost = nbrhosts[k]['host']
if isinstance(nbrhost, EosHost):
res = nbrhost.eos_command(commands=['show ip bgp sum'])
elif isinstance(nbrhost, SonicHost):
res = nbrhost.command('vtysh -c "show ip bgp sum"')
else:
res = ""
failmsg.append(res['stdout'])
pytest.fail(str(failmsg))
# Record memory status at end
memory_output = duthost.shell("show system-memory")["stdout"]
logging.info("Memory Status at end: %s", memory_output)
# Record orchagent CPU utilization at end
orch_cpu = duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"]
logging.info("Orchagent CPU Util at end: %s", orch_cpu)
# Record Redis Memory at end
end_time_redis_memory = duthost.shell("redis-cli info memory | grep used_memory_human | sed -e 's/.*:\(.*\)M/\\1/'")["stdout"]
logging.info("Redis Memory at start: %s M", start_time_redis_memory)
logging.info("Redis Memory at end: %s M", end_time_redis_memory)
# Calculate diff in Redis memory
incr_redis_memory = float(end_time_redis_memory) - float(start_time_redis_memory)
logging.info("Redis absolute difference: %d", incr_redis_memory)
# Check redis memory only if it is increased else default to pass
if incr_redis_memory > 0.0:
percent_incr_redis_memory = (incr_redis_memory / float(start_time_redis_memory)) * 100
logging.info("Redis Memory percentage Increase: %d", percent_incr_redis_memory)
pytest_assert(percent_incr_redis_memory < 5, "Redis Memory Increase more than expected: {}".format(percent_incr_redis_memory))
# Orchagent CPU should consume < orch_cpu_threshold at last.
logging.info("watch orchagent CPU utilization when it goes below %d", orch_cpu_threshold)
pytest_assert(wait_until(45, 2, 0, check_orch_cpu_utilization, duthost, orch_cpu_threshold),
"Orch CPU utilization {} > orch cpu threshold {} before link flap"
.format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold))
|
# -*- coding: utf-8 -*-
import os
import sys
from news_crawler.spiders import BaseSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from datetime import datetime
sys.path.insert(0, os.path.join(os.getcwd(), "..",))
from news_crawler.items import NewsCrawlerItem
from news_crawler.utils import remove_empty_paragraphs
class Sputniknews(BaseSpider):
"""Spider for Sputniknews"""
name = 'sputniknews'
rotate_user_agent = True
allowed_domains = ['snanews.de']
start_urls = ['https://snanews.de']
# Exclude pages without relevant articles
rules = (
Rule(
LinkExtractor(
allow=(r'snanews\.de\/\d+\/\w.*\.html$'),
deny=(r'snanews\.de\/category\_multimedia\/',
r'snanews\.de\/location\_oesterreich\/',
r'snanews\.de\/\?modal\=feedback',
r'snanews\.de\/docs\/impressum\.html',
r'snanews\.de\/docs\/cookie\.html',
r'snanews\.de\/docs\/nutzungsrichtlinien\.html',
r'snanews\.de\/docs\/ueber\_uns\.html',
r'snanews\.de\/docs\/privacy\_policy\.html'
)
),
callback='parse_item',
follow=True
),
)
def parse_item(self, response):
"""
Checks article validity. If valid, it parses it.
"""
# Check date validity
creation_date = response.xpath('//div[@itemprop="datePublished"]/text()').get()
if not creation_date:
return
creation_date = datetime.fromisoformat(creation_date.split('T')[0])
if self.is_out_of_date(creation_date):
return
# Extract the article's paragraphs
paragraphs = [node.xpath('string()').get().strip() for node in response.xpath('//div[@class="article__text"] | //div[@class="article__quote-text"]')]
paragraphs = remove_empty_paragraphs(paragraphs)
text = ' '.join([para for para in paragraphs])
# Check article's length validity
if not self.has_min_length(text):
return
# Check keywords validity
if not self.has_valid_keywords(text):
return
# Parse the valid article
item = NewsCrawlerItem()
item['news_outlet'] = 'sputniknews'
item['provenance'] = response.url
item['query_keywords'] = self.get_query_keywords()
# Get creation, modification, and crawling dates
item['creation_date'] = creation_date.strftime('%d.%m.%Y')
last_modified = response.xpath('//div[@itemprop="dateModified"]/text()').get()
item['last_modified'] = datetime.fromisoformat(last_modified.split('T')[0]).strftime('%d.%m.%Y')
item['crawl_date'] = datetime.now().strftime('%d.%m.%Y')
# Get authors
authors = response.xpath('//div[@itemprop="creator"]/div[@itemprop="name"]/text()').getall()
item['author_person'] = authors if authors else list()
item['author_organization'] = list()
# Extract keywords, if available
news_keywords = response.xpath('//meta[@name="keywords"]/@content').get()
item['news_keywords'] = news_keywords.split(', ') if news_keywords else list()
# Get title, description, and body of article
title = response.xpath('//meta[@property="og:title"]/@content').get()
description = response.xpath('//meta[@property="og:description"]/@content').get()
# Body as dictionary: key = headline (if available, otherwise empty string), values = list of corresponding paragraphs
body = dict()
if response.xpath('//h3[@class="article__h2"] | //h2[@class="article__h2"]'):
# Extract headlines
headlines = [h2.xpath('string()').get().strip() for h2 in response.xpath('//h3[@class="article__h2"] | //h2[@class="article__h2"]')]
# Extract the paragraphs and headlines together
text = [node.xpath('string()').get().strip() for node in response.xpath('//div[@class="article__text"] | //div[@class="article__quote-text"] | //h3[@class="article__h2"] | //h2[@class="article__h2"]')]
# Extract paragraphs between the abstract and the first headline
body[''] = remove_empty_paragraphs(text[:text.index(headlines[0])])
# Extract paragraphs corresponding to each headline, except the last one
for i in range(len(headlines)-1):
body[headlines[i]] = remove_empty_paragraphs(text[text.index(headlines[i])+1:text.index(headlines[i+1])])
# Extract the paragraphs belonging to the last headline
body[headlines[-1]] = remove_empty_paragraphs(text[text.index(headlines[-1])+1:])
else:
# The article has no headlines, just paragraphs
body[''] = paragraphs
item['content'] = {'title': title, 'description': description, 'body':body}
# Extract first 5 recommendations towards articles from the same news outlet, if available
item['recommendations'] = list()
item['response_body'] = response.body
yield item
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == "__main__":
def midlgeom(a):
if len(a) != 0:
res = 0
for i in range(len(a)):
res += 1/a[i]
return len(a) / res
else:
return None
raw = input('Введите последовательность чисел через пробел: ')
mas = [int(i) for i in raw.split(' ') if i.isdigit()]
print(midlgeom(mas))
|
import sys
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
print("Failed to load fast LibYAML bindings. You should install them to speed up kluctl.", file=sys.stderr)
from yaml import SafeLoader as SafeLoader, SafeDumper as SafeDumper
def construct_value(load, node):
if not isinstance(node, yaml.ScalarNode):
raise yaml.constructor.ConstructorError(
"while constructing a value",
node.start_mark,
"expected a scalar, but found %s" % node.id, node.start_mark
)
yield str(node.value)
# See https://github.com/yaml/pyyaml/issues/89
SafeLoader.add_constructor(u'tag:yaml.org,2002:value', construct_value)
def multiline_str_representer(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
class MultilineStrDumper(SafeDumper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_representer(str, multiline_str_representer)
def yaml_load(s):
return yaml.load(s, Loader=SafeLoader)
def yaml_load_all(s):
return list(yaml.load_all(s, Loader=SafeLoader))
def yaml_load_file(path, all=False):
with open(path) as f:
if all:
y = yaml_load_all(f)
else:
y = yaml_load(f)
return y
def yaml_dump(y, stream=None):
return yaml.dump(y, stream=stream, Dumper=MultilineStrDumper, sort_keys=False)
def yaml_dump_all(y, stream=None):
return yaml.dump_all(y, stream=stream, Dumper=MultilineStrDumper, sort_keys=False)
def yaml_save_file(y, path):
with open(path, mode='w') as f:
yaml_dump(y, f)
|
"""
Plotting data points
--------------------
GMT shines when it comes to plotting data on a map. We can use some sample data that is
packaged with GMT to try this out. PyGMT provides access to these datasets through the
:mod:`pygmt.datasets` package. If you don't have the data files already, they are
automatically downloaded and saved to a cache directory the first time you use them
(usually ``~/.gmt/cache``).
"""
import pygmt
########################################################################################
# For example, let's load the sample dataset of tsunami generating earthquakes around
# Japan (:func:`pygmt.datasets.load_japan_quakes`). The data is loaded as a
# :class:`pandas.DataFrame`.
data = pygmt.datasets.load_japan_quakes()
# Set the region for the plot to be slightly larger than the data bounds.
region = [
data.longitude.min() - 1,
data.longitude.max() + 1,
data.latitude.min() - 1,
data.latitude.max() + 1,
]
print(region)
print(data.head())
########################################################################################
# We'll use :meth:`pygmt.Figure.plot` method to plot circles on the locations of the
# hypocenters of the earthquakes.
fig = pygmt.Figure()
fig.basemap(region=region, projection="M8i", frame=True)
fig.coast(land="black", water="skyblue")
fig.plot(x=data.longitude, y=data.latitude, style="c0.3c", color="white", pen="black")
fig.show()
########################################################################################
# We used the style ``c0.3c`` which means "circles of 0.3 centimeter size". The ``pen``
# attribute controls the outline of the symbols and the ``color`` controls the fill.
#
# We can map the size of the circles to the earthquake magnitude by passing an array to
# the ``sizes`` argument. Because the magnitude is on a logarithmic scale, it helps to
# show the differences by scaling the values using a power law.
fig = pygmt.Figure()
fig.basemap(region=region, projection="M8i", frame=True)
fig.coast(land="black", water="skyblue")
fig.plot(
x=data.longitude,
y=data.latitude,
sizes=0.02 * (2 ** data.magnitude),
style="cc",
color="white",
pen="black",
)
fig.show()
########################################################################################
# Notice that we didn't include the size in the ``style`` argument this time, just the
# symbol ``c`` (circles) and the unit ``c`` (centimeter). So in this case, the sizes
# will be interpreted as being in centimeters.
#
# We can also map the colors of the markers to the depths by passing an array to the
# ``color`` argument and providing a colormap name (``cmap``). We can even use the new
# matplotlib colormap "viridis". Here, we first create a continuous colormap
# ranging from the minimum depth to the maximum depth of the earthquakes
# using :func:`pygmt.makecpt`, then set ``cmap=True`` in :func:`pygmt.Figure.plot`
# to use the colormap. At the end of the plot, we also plot a colorbar showing
# the colormap used in the plot.
#
fig = pygmt.Figure()
fig.basemap(region=region, projection="M8i", frame=True)
fig.coast(land="black", water="skyblue")
pygmt.makecpt(cmap="viridis", series=[data.depth_km.min(), data.depth_km.max()])
fig.plot(
x=data.longitude,
y=data.latitude,
sizes=0.02 * 2 ** data.magnitude,
color=data.depth_km,
cmap=True,
style="cc",
pen="black",
)
fig.colorbar(frame='af+l"Depth (km)"')
fig.show()
|
#!/usr/bin/env python
"""Copyright (c) 2005-2019, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# Script to run gcov on source files after a Coverage build has been done,
# and summarise the results.
# The script takes arguments:
# <output_dir> The directory in which to generate summary files and
# an index page.
# <build_type> The build type used; defaults to Coverage.
import itertools
import glob
import os
import sys
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path[0:0] = [parent_path]
import BuildTypes
# Arguments to gcov
# -l Create long file names for included source files.
# -p Preserve complete path information in the names of generated .gcov files.
gcov_flags = ' -lp '
# Get output dir and build type object
if len(sys.argv) < 2:
print "Syntax error."
print "Usage:", sys.argv[0], "<test output dir> [<build type> [<project> ...]]"
sys.exit(1)
output_dir = sys.argv[1]
if len(sys.argv) > 2:
build_type = sys.argv[2]
projects = sys.argv[3:]
else:
build_type = 'Coverage'
projects = []
build = BuildTypes.GetBuildType(build_type)
# Remove any old output files/test results from output_dir
for filename in os.listdir(output_dir):
os.remove(os.path.join(output_dir, filename))
# Find .gcda files to determine which source files to run gcov on
# First, find appropriate build directories
build_dirs = glob.glob('*/build/' + build.build_dir)
build_dirs.extend(map(lambda p: os.path.join(p, 'build', build.build_dir), projects))
# Now find .gcda files within there
gcda_files = []
for build_dir in build_dirs:
for dirpath, dirnames, filenames in os.walk(build_dir):
for filename in filenames:
if filename[-5:] == '.gcda':
gcda_files.append({'dir': dirpath, 'file': filename})
# Run gcov on all the .cpp files which have .gcda files.
for gcda_file in gcda_files:
# For added interest, the source file to process is in different locations
# depending on whether it is a test or not.
if gcda_file['file'][:4] == 'Test' or \
gcda_file['dir'][-5:] == '/test':
#gcda_file['dir'].find('/test/') != -1:
# .cpp file is in the same folder
os.system('gcov -o ' + gcda_file['dir'] + gcov_flags +
os.path.join(gcda_file['dir'], gcda_file['file'][:-4] + 'cpp'))
else:
# .cpp file is contained within the Chaste source tree
# gcda_file['dir'] should look something like mesh/build/coverage/src/reader
# We then want to look in mesh/src/reader
try:
toplevel, rest = gcda_file['dir'].split('build')
except:
print gcda_file
raise
# Get rid of slashes (or system equivalent)
toplevel = os.path.dirname(toplevel)
# Drop the '/coverage/'
rest = rest.split(os.path.sep, 2)[-1]
# Run gcov
os.system('gcov -o ' + gcda_file['dir'] + gcov_flags +
os.path.join(toplevel, rest, gcda_file['file'][:-4] + 'cpp'))
# Now find all our source files
src_dirs = glob.glob('*/src')
src_dirs.remove('apps/src')
src_dirs.extend(map(lambda p: os.path.join(p, 'src'), projects))
src_files = []
for src_dir in src_dirs:
for dirpath, dirnames, filenames in os.walk(src_dir):
for filename in filenames:
if filename[-4:] in ['.cpp', '.hpp']:
src_files.append({'dir': dirpath, 'file': filename})
def coverage_ignore(src_file):
"""Whether to ignore the fact that a source file is not used.
If a file contains only typedefs, for example, this is not an error.
For .hpp files we check this by looking for the presence of either
'template' or 'class' at the start of a line. If neither are found,
we assume the file contains no real code.
This will only work if header files don't contain non-template function
definitions, which should be the case if we're being good programmers.
Unfortunately the boost serialization tweaking file "SerializationExportWrapper.hpp"
has some templated definitions which are not code, for this reason we only
scrape the file for "template" or "class" definitions that are not surrounded
by COVERAGE_IGNORE.
"""
ignore = False
if src_file['dir'].endswith('fortests'):
# 'Source' code that is only used for tests, and hence coverage doesn't
# matter.
ignore = True
elif src_file['file'] == 'triangle.cpp' or src_file['file'] == 'tetgen.cpp' or src_file['file'] == 'predicates.cpp':
# We don't try to cover other people's code
ignore = True
elif src_file['file'] in ['HeartRegionCodes.cpp', 'Version.hpp']:
# Special cases
ignore = True
elif src_file['file'][-4:] == '.hpp':
ignore = True
fp = open(os.path.join(src_file['dir'], src_file['file']))
code = True
for line in fp:
if line.find('// LCOV_EXCL_START') != -1:
code = False
elif line.find('// LCOV_EXCL_STOP') != -1:
code = True
if code and (line.startswith('template') or line.startswith('class ')):
ignore = False
break
fp.close()
return ignore
for src_file in src_files:
# Mangle the name like gcov does
mangled_dir = src_file['dir'].replace(os.path.sep, '#')
# Find .gcov files relating to this source file
gcov_files = glob.glob('*' + mangled_dir + '#' + src_file['file'] + '.gcov')
# Open all the files, and an output file
gcov_fps = [open(gcov_file) for gcov_file in gcov_files]
out_file_name = os.path.join(output_dir, mangled_dir + '#' + src_file['file'])
out_file_name = out_file_name.replace('#', '-')
out_file = open(out_file_name, 'w')
# Now go through them line by line in lock-step,
# aggregating line execution counts
covered_line_count, missed_line_count, warn, ignore = 0, 0, True, False
for lines in itertools.izip(*gcov_fps):
aggregated_count = 0
maybe_not_code, really_uncovered = False, False
for line in lines:
count, line_no, src_line = line.split(':', 2)
count, line_no = count.strip(), line_no.strip()
if src_line.find('// LCOV_EXCL_START') != -1:
ignore = True
out_file.write("%9s:%5s:%s" % ('ignored', line_no, src_line))
break
elif src_line.find('// LCOV_EXCL_STOP') != -1:
ignore = False
out_file.write("%9s:%5s:%s" % ('ignored', line_no, src_line))
break
if line_no == 0:
# This is a gcov header line; what it is doesn't matter
out_file.write(line)
break
if count == '-':
# This line "isn't code". This may be because it's blank, a comment, or
# similar. Or it may be because it's within a templated method that hasn't
# been instantiated in this particular execution, but it might be in another.
maybe_not_code = True
elif count == '#####' or count == '=====':
# The line was really uncovered here, so it must be code.
# From gcov documentation, # indicates reachable by non-exceptional paths;
# = only by an exceptional path (e.g. catch block).
really_uncovered = True
else:
aggregated_count += int(count)
else:
if aggregated_count == 0:
if maybe_not_code and not really_uncovered:
# This really wasn't a code line (or the template is *never* instantiated).
# Would be nice to differentiate these cases, but doing so is decidedly
# non-trivial.
aggregated_count = '-'
else:
src_line_stripped = src_line.strip()
# gcov is buggy: it claims some non-code lines are uncovered.
# There are some other cases it gets wrong for better reasons too.
if not (ignore or src_line_stripped in ['{', '}', 'NEVER_REACHED;'] or
(src_line_stripped.startswith('return') and
src_line_stripped[6] in [';', ' ']) or
src_line_stripped.startswith('TERMINATE(') or
src_line_stripped.startswith('assert(DIM') or
src_line_stripped.startswith('assert(ELEM_DIM') or
src_line_stripped.startswith('assert(SPACE_DIM') or
src_line_stripped.startswith('assert(ELEMENT_DIM') or
src_line_stripped.startswith('EXCEPT_IF_NOT(ELEMENT_DIM') or
src_line_stripped.startswith('#') or
src_line_stripped.startswith('EXPORT_TEMPLATE') or
src_line_stripped.startswith('template class ') or
(src_line_stripped.startswith('virtual ') and src_line_stripped.endswith('(')) or
(src_line_stripped.startswith('catch ') and #Line is catch (...)
src_line_stripped[-1] == ')') or
src_line_stripped.startswith('class ') or
#Method definition (possibly). Currently overlaps with previous 'catch' ignore
(len(src_line_stripped) > 0 and
(src_line_stripped[-1] == ')' or src_line_stripped.endswith(') const')))
):
warn = False
aggregated_count = '#####'
#print 'Full details of coverage: ', src_line_stripped,'\t',src_file,'\t',aggregated_count,'\t', line_no,'\t', src_line
else:
aggregated_count = 'ignored'
missed_line_count += 1
else:
covered_line_count += 1
out_file.write("%9s:%5s:%s" % (aggregated_count, line_no, src_line))
# Output a summary
if not gcov_files:
# No gcov files found for this source file.
# This may not be an error, if the source file in question is an .hpp file with
# an associated .cpp file containing all the code for the class.
##print src_file
if src_file['file'][-4:] == '.hpp' and \
os.path.exists(os.path.join(src_file['dir'], src_file['file'][:-3]+'cpp')):
status = '' # So output file will be deleted
else:
out_file.write("This source file wasn't used at all!\n\nFailed 1 of 1 test\n")
status = "1_1"
elif not ignore and missed_line_count == 0:
out_file.write('\nOK!\n\n')
status = 'OK'
else:
counts = (missed_line_count, missed_line_count+covered_line_count)
out_file.write('\nFailed %d of %d tests\n\n' % counts)
status = "%d_%d" % counts
if warn:
status = 'warn_' + status
if ignore:
status = 'ignore_' + status
if coverage_ignore(src_file):
# All special case ignorable files (not just ones with partial coverage)
status = ''
# Close all files
[fp.close() for fp in gcov_fps]
out_file.close()
# Alter file name to indicate summary
if status:
os.rename(out_file_name, out_file_name + '.' + status + '.0')
else:
os.remove(out_file_name)
# Now remove .gcov files from the Chaste root directory
for filename in os.listdir('.'):
if filename[-5:] == '.gcov':
os.remove(filename)
# And generate a summary page
os.system('python python/DisplayTests.py '+output_dir+' '+build_type)
|
#!/usr/bin/env python3
""" Health authority back end REST and static content server """
__copyright__ = """
Copyright 2020 Diomidis Spinellis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import argparse
from dp3t.protocols.server_database import ServerDatabase
from epidose.common.daemon import Daemon
from flask import Flask, abort, jsonify, request, send_from_directory
import logging
from os.path import basename, dirname
API_VERSION = "1"
app = Flask("ha-server")
db = None
FILTER_LOCATION = "/var/lib/epidose/filter.bin"
DATABASE_LOCATION = "/var/lib/epidose/server-database.db"
UPDATE_LOCATION = "/var/lib/epidose/update.sh"
def shutdown_server():
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
@app.before_request
def before_request():
global db
if not db:
db = ServerDatabase(DATABASE_LOCATION)
db.connect(reuse_if_open=True)
@app.after_request
def after_request(response):
global db
if not app.config["TESTING"]:
db.close()
return response
@app.route("/filter", methods=["GET"])
def filter():
"""Send the Cuckoo filter as a static file.
In a production deployment this should be handled by the front-end server,
such as nginx.
"""
return send_from_directory(dirname(FILTER_LOCATION), basename(FILTER_LOCATION))
@app.route("/update", methods=["GET"])
def update():
"""Send the update shell script as a static file."""
return send_from_directory(dirname(UPDATE_LOCATION), basename(UPDATE_LOCATION))
@app.route("/shutdown")
def shutdown():
if app.debug:
shutdown_server()
return "Server shutting down..."
else:
abort(405)
@app.route("/version", methods=["GET"])
def version():
return jsonify({"version": API_VERSION})
@app.route("/add_contagious", methods=["POST"])
def add_contagious():
content = request.json
with db.atomic():
logger.debug(f"Add new data with authorization {content['authorization']}")
# TODO: Check authorization
for rec in content["data"]:
epoch = rec["epoch"]
seed = bytes.fromhex(rec["seed"])
db.add_epoch_seed(epoch, seed)
logger.debug(f"Add {epoch} {seed.hex()}")
# TODO: Delete authorization
return "OK"
def initialize(args):
"""Initialize the server's database and logger. """
global daemon
daemon = Daemon("ha_server", args)
# Setup logging
global logger
logger = daemon.get_logger()
# Connect to the database
global db
db = ServerDatabase(args.database)
def main():
parser = argparse.ArgumentParser(
description="Health authority back end REST and static content server "
)
parser.add_argument(
"-d", "--debug", help="Run in debug mode logging to stderr", action="store_true"
)
global DATABASE_LOCATION
parser.add_argument(
"-D",
"--database",
help="Specify the database location",
default=DATABASE_LOCATION,
)
global FILTER_LOCATION
parser.add_argument(
"-f",
"--filter",
help="Specify the location of the Cuckoo filter",
default=FILTER_LOCATION,
)
parser.add_argument(
"-s",
"--server-name",
help="Specify the server name (0.0.0.0 for externally visible)",
default="127.0.0.1",
)
parser.add_argument("-p", "--port", help="Set TCP port to listen", type=int)
parser.add_argument(
"-v", "--verbose", help="Set verbose logging", action="store_true"
)
args = parser.parse_args()
initialize(args)
FILTER_LOCATION = args.filter
DATABASE_LOCATION = args.database
# Daemonize with gunicorn or other means, because the daemonize
# module has trouble dealing with the lock files when the app
# reloads itself.
app.run(debug=args.debug, host=args.server_name, port=args.port)
if __name__ == "__main__":
main()
else:
global logger
logger = logging.getLogger("gunicorn.error")
|
import numpy as np
from sklearn.metrics import roc_auc_score
from numba import jit
def array2str(tmp_array, sep = " "):
str_list = ["{:.3f}".format(tmp_item) for tmp_item in tmp_array]
return sep.join(str_list)
def generate_sorted_groups(pred, y, a):
a_idx = np.where(a == 0)
b_idx = np.where(a == 1)
b_score = pred[b_idx].reshape(-1)
b_index = np.argsort(-b_score)
b_score_sort = b_score[b_index]
b_label = y[b_idx]
b_label_sort = b_label[b_index]
a_score = pred[a_idx].reshape(-1)
a_index = np.argsort(-a_score)
a_score_sort = a_score[a_index]
a_label = y[a_idx]
a_label_sort = a_label[a_index]
return a_score_sort,b_score_sort,a_label_sort,b_label_sort
def cal_fairness_metric_by_groups(a_score, b_score, a_label, b_label, metric = "xauc"):
if metric == "xauc":
metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)
else:
metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)
return abs(metric_ab - metric_ba),metric_ab,metric_ba
def cal_fairness_metric(pred, y, a, metric = "xauc"):
a_idx, b_idx = np.where(a == 0), np.where(a == 1)
a_score, b_score = pred[a_idx].reshape(-1), pred[b_idx].reshape(-1)
a_label, b_label = y[a_idx].reshape(-1), y[b_idx].reshape(-1)
if metric == "xauc":
metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)
else:
metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)
return abs(metric_ab - metric_ba),metric_ab,metric_ba
def AUC(score, label):
###[from big to small]
sum_ = 0
num = len(label)
for i in range(num):
for j in range(num):
if label[i]==1 and label[j]==0:
if score[i]>score[j]:
sum_ += 1
return sum_/(np.sum(label)*(num-np.sum(label))), sum_
def xAUC(a_score, b_score, a_label, b_label):
sum_ab = 0
sum_ba = 0
numa = len(a_label)
numb = len(b_label)
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
for i in range(numa):
for j in range(numb):
if a_label[i] ==1 and b_label[j] ==0:
if a_score[i]>b_score[j]:
sum_ab+=1
elif a_label[i]==0 and b_label[j]==1:
if b_score[j]>a_score[i]:
sum_ba+=1
return sum_ab/(a_num1*b_num0), sum_ba/(b_num1*a_num0), sum_ab+sum_ba
def xAUC_fast(a_score, b_score, a_label, b_label):
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]
b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]
ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0))))
ab_score = np.concatenate((a_score1,b_score0))
xauc_ab = roc_auc_score(ab_label,ab_score)
ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0))))
ba_score = np.concatenate((b_score1,a_score0))
xauc_ba = roc_auc_score(ba_label,ba_score)
return xauc_ab, xauc_ba, xauc_ab * a_num1 * b_num0 + xauc_ba * b_num1 * a_num0
def post_score(train_score, train_score_post, test_score):
tep_id = 0
bins = [[] for i in range(len(train_score)+1)]
for i in range(len(test_score)):
s = test_score[i]
if s>train_score[0]:
bins[0].append(s)
elif s<=train_score[-1]:
bins[-1].append(s)
else:
for j in range(tep_id,len(train_score)):
if train_score[j-1]>=s and train_score[j]<s:
bins[j].append(s)
tep_id = j
break
changed_b_score = []
for bin_ in range(len(bins)):
for item in range(len(bins[bin_])):
num = (len(bins[bin_]))
if bin_==0:
changed_b_score.append((item)*train_score_post[bin_]/num+(num-item)/num)
elif bin_==len(train_score_post):
changed_b_score.append((num -item)*train_score_post[bin_-1]/num)
else:
changed_b_score.append((item)*train_score_post[bin_]/num + (num-item)*train_score_post[bin_-1]/num)
return np.array(changed_b_score)
@jit(nopython=True)
def maxAUC(a_label, b_label):
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
path = np.zeros((M+1, N+1,2,2))
cost = np.zeros((M+1, N+1))
for i in range(1,M+1):
if a_label[i]==1:
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost[0, i] = cost[0,i-1]+ M - a_1
else:
cost[0, i] = cost[0,i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if i-j+1>N or a_label[j]==0:
tep_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
if j+1>M or b_label[i-j]==0:
tep_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
if cost[j-1, i-j] + tep_b > cost[j, i-j-1] + tep_a:
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost[M,N], path
@jit(nopython=True)
def xAUC_post(a_label, b_label, lamb):
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
a_1_b_0 = a_1*(N-b_1)
b_1_a_0 = b_1*(M - a_1)
path = np.zeros((M+1, N+1,2,2))
cost_unfair = np.zeros((M+1, N+1))
cost = np.zeros((M+1, N+1))
for i in range(1,M+1):
if a_label[i]==1:
cost_unfair[i, 0] = (N-b_1)/a_1_b_0*lamb + cost_unfair[i-1,0]
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost_unfair[i, 0] = cost_unfair[i-1,0]
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost_unfair[0,i] = -(M-a_1)/b_1_a_0*lamb + cost_unfair[0, i-1]
cost[0, i] = cost[0,i-1] + M - a_1
else:
cost[0, i] = cost[0,i-1]
cost_unfair[0, i] = cost_unfair[0,i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if i-j+1>N or a_label[j]==0:
tep_b = 0
tep_unfair_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
tep_unfair_b = tep_b/a_1_b_0*lamb
if j+1>M or b_label[i-j]==0:
tep_a = 0
tep_unfair_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
tep_unfair_a = -tep_a/b_1_a_0*lamb
if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):
cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost, path, cost_unfair
@jit(nopython=True)
def xAUC_post_(a_label, b_label, lamb):
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
a_1_b_0 = a_1*(N-b_1)
b_1_a_0 = b_1*(M - a_1)
path = np.zeros((M+1, N+1,2,2))
cost_unfair = np.zeros((M+1, N+1))
cost = np.zeros((M+1, N+1))
for i in range(1,M+1):
if a_label[i]==1:
cost_unfair[i, 0] = (N-b_1)/a_1_b_0 * lamb + cost_unfair[i-1,0]
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost_unfair[i, 0] = cost_unfair[i-1,0]
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost_unfair[0,i] = -(M - a_1) / b_1_a_0 * lamb + cost_unfair[0, i-1]
cost[0, i] = cost[0,i-1] + M - a_1
else:
cost[0, i] = cost[0,i-1]
cost_unfair[0, i] = cost_unfair[0,i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
# print(i)
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if a_label[j]==0:
tep_b = 0
tep_unfair_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
tep_unfair_b = tep_b/a_1_b_0*lamb
if b_label[i-j]==0:
tep_a = 0
tep_unfair_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
tep_unfair_a = -tep_a/b_1_a_0*lamb
if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):
cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost, path, cost_unfair
@jit(nopython=True)
def pairwise_post(a_label, b_label, lamb):
###a, b has been sorted decreasing sort.
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
a_1_0 = a_1*((N-b_1)+(M - a_1))
b_1_0 = b_1*((M - a_1)+(N-b_1))
path = np.zeros((M+1, N+1,2,2))
cost_unfair = np.zeros((M+1, N+1))
cost = np.zeros((M+1, N+1))
zeros_mat = np.zeros((M+1, N+1))
zeros_mat[0,0] = ((N-b_1)+(M - a_1))
for i in range(1,N+1):
if b_label[i]==1:
zeros_mat[0,i] = zeros_mat[0,i-1]
else:
zeros_mat[0,i] = zeros_mat[0,i-1]-1
for i in range(1,M+1):
if a_label[i]==0:
zeros_mat[i,0] = zeros_mat[i-1,0]-1
else:
zeros_mat[i,0] = zeros_mat[i-1,0]
for j in range(1,N+1):
if b_label[j]==0:
zeros_mat[i,j] = zeros_mat[i,j-1]-1
else:
zeros_mat[i,j] = zeros_mat[i,j-1]
for i in range(1,M+1):
if a_label[i]==1:
cost_unfair[i, 0] = zeros_mat[i,0]/a_1_0*lamb + cost_unfair[i-1,0]
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost_unfair[i, 0] = cost_unfair[i-1,0]
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost_unfair[0,i] = -zeros_mat[0,i]/b_1_0*lamb + cost_unfair[0, i-1]
cost[0, i] = cost[0,i-1] + M - a_1
else:
cost[0, i] = cost[0,i-1]
cost_unfair[0, i] = cost_unfair[0, i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if a_label[j]==0:
tep_b = 0
tep_unfair_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
tep_unfair_b = zeros_mat[j,i-j]/a_1_0*lamb
if b_label[i-j]==0:
tep_a = 0
tep_unfair_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
tep_unfair_a = -zeros_mat[j,i-j]/b_1_0*lamb
if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):
cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost, path, cost_unfair
def post_b_score(a_score, b_score, a_label, b_label, lamb = 0, _type="xauc"): ## score has to be decreasing.
M = len(a_score)
N = len(b_score)
if _type == "xauc":
cost, path_ , cost_unfair = xAUC_post(a_label, b_label, lamb = lamb)
elif _type=="AUC":
cost, path_ = maxAUC(a_label, b_label)
elif _type=="prf":
cost, path_ , cost_unfair = pairwise_post(a_label, b_label, lamb = lamb)
else:
print("Unknown type")
exit()
@jit(nopython=True)
def pathTrace(path):
trace = []
tep = path[M,N,:,:]
trace.append(tep[-1,:])
trace.append(tep[0,:])
for i in range(M+N-1):
tep = path[int(tep[0][0]), int(tep[0][1]), :,:]
trace.append(tep[0,:])
trace.reverse()
return trace
path = pathTrace(path_)
gap_a = [[] for i in range(M+1)]
for i in range(1,len(path)):
if int(path[i][0])==int(path[i-1][0]):
gap_a[int(path[i][0])].append(int(path[i][1]))
changed_b_score = []
for bin_ in range(len(gap_a)):
for item in range(len(gap_a[bin_])):
num = (len(gap_a[bin_])+1)
if bin_==0:
changed_b_score.append((item+1)*a_score[bin_]/num+(num-item-1)/num)
elif bin_==len(a_score):
changed_b_score.append((num -item-1)*a_score[bin_-1]/num)
else:
changed_b_score.append((item+1)*a_score[bin_]/num + (num-item-1)*a_score[bin_-1]/num)
if _type=="AUC":
return np.array(changed_b_score), 0
else:
return np.array(changed_b_score), cost_unfair[-1, -1]
def pairwise(a_score, b_score, a_label, b_label):
sum_ab = 0
sum_ba = 0
numa = len(a_label)
numb = len(b_label)
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
i_AUCa = roc_auc_score(a_label, a_score)
i_AUCb = roc_auc_score(b_label, b_score)
for i in range(numa):
for j in range(numb):
if a_label[i] ==1 and b_label[j] ==0:
if a_score[i]>b_score[j]:
sum_ab+=1
elif a_label[i]==0 and b_label[j]==1:
if b_score[j]>a_score[i]:
sum_ba+=1
return (sum_ab+i_AUCa*a_num0*a_num1)/(a_num1*(b_num0+a_num0)), (sum_ba+i_AUCb*b_num0*b_num1)/(b_num1*(a_num0+b_num0))
def pairwise_fast(a_score, b_score, a_label, b_label):
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]
b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]
ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0+a_num0))))
ab_score = np.concatenate((a_score1,a_score0,b_score0))
pair_ab = roc_auc_score(ab_label,ab_score) #[a=1, 0]
ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0+b_num0))))
ba_score = np.concatenate((b_score1,b_score0, a_score0))
pair_ba = roc_auc_score(ba_label,ba_score) #[b=1, 0]
return pair_ab, pair_ba
def zeros_mat(a, b):
a_label = [0] + a
b_label = [0] + b
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a)
b_1 = np.sum(b)
zeros_mat = np.zeros((M+1, N+1))
zeros_mat[0,0] = ((N-b_1)+(M - a_1))
for i in range(1,N+1):
if b_label[i]==1:
zeros_mat[0,i] = zeros_mat[0,i-1]
else:
zeros_mat[0,i] = zeros_mat[0,i-1]-1
for i in range(1,M+1):
if a_label[i]==0:
zeros_mat[i,0] = zeros_mat[i-1,0]-1
else:
zeros_mat[i,0] = zeros_mat[i-1,0]
for j in range(1,N+1):
if b_label[j]==0:
zeros_mat[i,j] = zeros_mat[i,j-1]-1
else:
zeros_mat[i,j] = zeros_mat[i,j-1]
return zeros_mat
|
from model.contact import Contact
from random import randrange
def test_contacts_on_homepage(app, db):
contacts_from_homepage = sorted(app.contact.get_contact_list(), key = Contact.id_or_max)
contacts_from_db = sorted(db.get_contact_list(), key = Contact.id_or_max)
assert len(contacts_from_homepage) == len(contacts_from_db)
for i in range(len(contacts_from_homepage)):
assert contacts_from_homepage[i].firstname == contacts_from_db[i].firstname
assert contacts_from_homepage[i].lastname == contacts_from_db[i].lastname
assert contacts_from_homepage[i].address == contacts_from_db[i].address
assert contacts_from_homepage[i].all_phones_from_home_page == contacts_from_db[i].all_phones_from_home_page
assert contacts_from_homepage[i].all_emails_from_home_page == contacts_from_db[i].all_emails_from_home_page
# def test_phones_on_contact_view_page(app):
# contact_from_viewpage = app.contact.get_contact_info_from_view_page(0) #контакт из viewpage контакта
# contact_from_editpage = app.contact.get_contact_info_from_edit_page(0) #контакт из формы редактирования
# assert contact_from_viewpage.phone_home == contact_from_editpage.phone_home
# assert contact_from_viewpage.phone_mobile == contact_from_editpage.phone_mobile
# assert contact_from_viewpage.phone_work == contact_from_editpage.phone_work
# assert contact_from_viewpage.phone2 == contact_from_editpage.phone2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.