text
stringlengths 2
999k
|
|---|
def correct_ini_file(config_file):
with open(config_file, mode='r') as raw_open:
raw_open.seek(0)
temp_api_details = raw_open.readlines(0)
# print(type(temp_api_details[0]))
with open(config_file, mode='w') as rewrite_config:
if temp_api_details[0] != '[TELEGRAM]\n':
rewrite_config.write('[TELEGRAM]\n')
for i in temp_api_details:
rewrite_config.write(i)
|
import json
import unittest2
from normalize import from_json
from normalize import JsonProperty
from normalize import JsonRecord
from normalize import Property
from normalize import Record
from normalize import to_json
from unique.encoding import JSONRecordIO
from testclasses import MultiLevelKeyValue
from testclasses import SimpleKeyValue
def jdump(obj):
return json.dumps(
obj,
indent=4,
separators=(',', ': '),
sort_keys=True,
)
class CustomMarshalled(JsonRecord):
key = Property(json_name="id")
value = Property()
def json_data(self, **args):
jd = super(CustomMarshalled, self).json_data(**args)
jd['oid'] = "1234567"
return jd
@classmethod
def json_to_initkwargs(cls, json_data, kwargs):
return super(CustomMarshalled, cls).json_to_initkwargs(
dict((k, v) for k, v in json_data.items() if k != 'oid'),
kwargs,
)
class SanityTest(unittest2.TestCase):
def test_simple_key(self):
sk = SimpleKeyValue(key="Bob", value="bill")
encoded = JSONRecordIO.encode_str(sk)
self.assertEqual(
encoded, '{\n "key": "Bob",\n "value": "bill"\n}',
)
decoded = JSONRecordIO.decode_str(SimpleKeyValue, encoded)[0]
self.assertEqual(sk, decoded)
def test_multi_level_key(self):
mlkv = MultiLevelKeyValue(
key="Casper",
items=[{"key": "toast", "value": "Charlie_Brown"},
{"key": "ham", "value": "Lucy"},
{"key": "spam", "value": "Franklin"}],
custom_val="Minotaur",
)
# IO using regular normalize
default_json = jdump(to_json(mlkv))
default_decoded = from_json(MultiLevelKeyValue, json.loads(default_json))
self.assertEqual(mlkv, default_decoded)
encoded = JSONRecordIO.encode_str(mlkv)
decoded = JSONRecordIO.decode_str(MultiLevelKeyValue, encoded)[0]
# FIXME: visitor should either respect all JsonRecord hints or none.
decoded.custom_val = 'Minotaur'
self.assertEqual(mlkv, decoded)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""sc2autosave is a utility for reorganizing and renaming Starcraft II files.
Overview
==============
sc2autosave provides a simple mechanism for renaming replay files as they are
copied or moved from a source directory to a destination directory. In between
runs the state is stored in the sc2autosave.dat file saved to the destination
folder. In this way, multiple destination folders with different organizations
and formats can be maintained independently.
General Operation
-------------------
When first run for a given destination directory, sc2autosave scans for all
files since the epoch. Each subsequent run scans only for files new files
since the previous scan time. This behavior can be modified on a run by run
basis by with the --since DATETIME option. By default the source directory
is scanned recursively. The --depth DEPTH option can limit and/or eliminate
this is recursion.
Files identified as new are then copied to the destination directory. The
--move option can override this behavior. The default behavior is a good idea
because it ensures that there is a backup copy and allows for several different
file structures to be constructed with different sc2autosave configurations for
easy replay navigation. You might keep your replay files redundantly stored
sorted by format, by map, and by matchup for easy lookup later on.
While normally run as a batch process, the --period SECONDS option can be used
to run sc2autosave as a background process, scanning the directory for changes
every SECONDS seconds. This is useful for creating background processes on
operating system start up.
Renaming Replays
--------------------
The --rename option allows you to specify a renaming format string. The string
is constructed the pythonic (3.0) way with {:field} indicating the substitution
of a field. The forward slash (/) is a special character here which terminates
a folder name and allows for organization into subdirectories. All other string
characters form the template into which the fields are inserted.
Fields related to dates and times (:date, :datetime, :length fields) can be
formatted through their respective directives (--date, --datetime, --length)
according to python date formatting conventions. Additionally, the player
display format can be refined with the --player-format FORMAT directive which
is interpreted similarly to the --rename FORMAT directive detailed above.
Once content has been defined to your tastes you may wish to get specific about
the ordering of the teams and players on those teams in the replay name. The
--team-order-by and --player-order-by directives can be used for this purpose.
A common preference is to favor specific players (like yourself and friends)
and their teams in the ordering by placing them first in the listing. The
--favor PLAYER1 [PLAYER2] directive supports this preference.
Filtering Replays
---------------------
Once a replay has been scanned and parsed you have an opportunity to filter it
for inclusion in the destination directory. This is useful when constructing
various different types of replay packs for distribution and review. Replays
are small and Battle.net has a terrible filesystem based replay locator; why
not make your life easier with a little duplication.
--filter-players PLAYER [PLAYER ...]
--filter-matchup MATCHUP [MATCHUP ...]
--filter-map NAME [NAME ...]
--filter-length LOW HIGH
--filter-date START END
Example Configurations
------------------------
This first basic configuration sets up a background process to copy new replays
without renaming to a 'Saved' subdirectory every 10 seconds. The depth 0 option
keeps the script from looking into the 'Saved' subdirectory.
sc2autosave \
--source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \
--dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \
--period 10 \
--depth 0
This next configuration runs in batch mode using the default renaming format.
sc2autosave \
--source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \
--dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \
--rename
(ZvP) Lost Temple: ShadesofGray(Z) vs Trisfall(P).SC2Replay
(ZZvPP) Shattered Temple: ShadesofGray(Z), Remedy(Z) vs ProfProbe(P), Trisfall(P).SC2Replay
Here is a heavily customized format that organizes replays into subdirectories
by replay format and favors ShadesofGray in the player and team orderings.
sc2autosave \
--source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \
--dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \
--rename "{:format}/{:matchup} on {:map}: {:teams}" \
--player-format "{:name}({:play_race})" \
--team-order-by number \
--player-order-by name \
--favored ShadesofGray
1v1/ZvP on Lost Temple: ShadesofGray(Z) vs Trisfall(P).SC2Replay
2v2/ZZvPP on Shattered Temple: ShadesofGray(Z), Remedy(Z) vs ProfProbe(P), Trisfall(P).SC2Replay
Next is another customized format which organizes replays by matchup. It uses
strict player and team ordering by number with no exceptions and formats game
length to show both minutes and seconds.
sc2autosave \
--source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \
--dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \
--rename "{:matchup}/({:length}) {:map}: {:teams}" \
--player-format "{:name}({:play_race})" \
--team-order-by number \
--player-order-by number \
--length "%M:%S"
PvZ/(20:14) Lost Temple: Trisfall(P) vs ShadesofGray(Z).SC2Replay
ZZvPP/(35:40) Shattered Temple: Remedy(Z), ShadesofGray(Z) vs Trisfall(P), ProfProbe(P).SC2Replay
Complete Reference Guide
---------------------------
--source SOURCE_FOLDER
The source folder to scan for replays. Uses recursive scan by default.
--dest DESTINATION_FOLDER
The destination folder to place replays into.
--depth DEPTH
Allows recursion to be limited and/or disabled (with DEPTH=0).
--period SECONDS
Puts sc2autosave into continuous mode, scanning the directory for new
files every SECONDS seconds.
--rename FORMAT
:map - Inserts the map name.
:date - Inserts a string formatted datetime object using --date-format.
:length - Inserts a string formatted time object using --length-format.
:teams - Inserts a comma separated player list. Teams are separated
with a ' vs ' string. Format the player with --player-format.
:format - Inserts the map format (1v1, 2v2, 3v3, etc)
:matchup - Inserts the matchup (ZvZ, PTvTZ, etc). The matchup is
in team order with races ordered alphabetically; not by player!
This makes matchups more consistent and useful for sorting.
--length-format FORMAT
--player-format FORMAT
--date-format FORMAT
--team-order-by FIELD
--player-order-by FIELD
--favored NAME [NAME,...]
POST-Parse filtering vs preparse filtering?
POST-Parse, how to do it?!?!?!?!
"""
import argparse
import cPickle
import os
import shutil
import sys
import time
import sc2reader
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
def run(args):
# Reset wipes the destination clean so we can start over.
if args.reset:
reset(args)
# Set up validates the destination and source directories.
# It also loads the previous state or creates one as necessary.
state = setup(args)
# We break out of this loop in batch mode and on KeyboardInterrupt
while True:
# The file scan uses the arguments and the state to filter down to
# only new (since the last sync time) files.
for path in scan(args, state):
try:
# Read the file and expose useful aspects for renaming/filtering
replay = sc2reader.load_replay(path, load_level=2)
except KeyboardInterrupt:
raise
except:
# Failure to parse
file_name = os.path.basename(path)
directory = make_directory(args, ("parse_error",))
new_path = os.path.join(directory, file_name)
source_path = path[len(args.source) :]
args.log.write("Error parsing replay: {0}".format(source_path))
if not args.dryrun:
args.action.run(path, new_path)
# Skip to the next replay
continue
aspects = generate_aspects(args, replay)
# Use the filter args to select files based on replay attributes
if filter_out_replay(args, replay):
continue
# Apply the aspects to the rename formatting.
#'/' is a special character for creation of subdirectories.
# TODO: Handle duplicate replay names, its possible..
path_parts = args.rename.format(**aspects).split("/")
filename = path_parts.pop() + ".SC2Replay"
# Construct the directory and file paths; create needed directories
directory = make_directory(args, path_parts)
new_path = os.path.join(directory, filename)
# Find the source relative to the source directory for reporting
dest_path = new_path[len(args.dest) :]
source_path = path[len(args.source) :]
# Log the action and run it if we are live
msg = "{0}:\n\tSource: {1}\n\tDest: {2}\n"
args.log.write(msg.format(args.action.type, source_path, dest_path))
if not args.dryrun:
args.action.run(path, new_path)
# After every batch completes, save the state and flush the log
# TODO: modify the state to include a list of remaining files
args.log.flush()
save_state(state, args)
# We only run once in batch mode!
if args.mode == "BATCH":
break
# Since new replays come in fairly infrequently, reduce system load
# by sleeping for an acceptable response time before the next scan.
time.sleep(args.period)
args.log.write("Batch Completed")
def filter_out_replay(args, replay):
player_names = set([player.name for player in replay.players])
filter_out_player = not set(args.filter_player) & player_names
if args.filter_rule == "ALLOW":
return filter_out_player
else:
return not filter_out_player
# We need to create these compare functions at runtime because the ordering
# hinges on the --favored PLAYER options passed in from the command line.
def create_compare_funcs(args):
favored_set = set(name.lower() for name in args.favored)
def player_compare(player1, player2):
# Normalize the player names and generate our key metrics
player1_name = player1.name.lower()
player2_name = player2.name.lower()
player1_favored = player1_name in favored_set
player2_favored = player2_name in favored_set
# The favored player always comes first in the ordering
if player1_favored and not player2_favored:
return -1
elif player2_favored and not player1_favored:
return 1
# The most favored person will always be listed first
elif player1_favored and player2_favored:
player1_index = args.favored.index(player1_name)
player2_index = args.favored.index(player2_name)
return player1_index - player2_index
# If neither is favored, we'll order by number for now
# TODO: Allow command line specification of other orderings (maybe?)
else:
return player1.pid - player2.pid
def team_compare(team1, team2):
# Normalize the team name lists and generate our key metrics
team1_names = set(p.name.lower() for p in team1.players)
team2_names = set(p.name.lower() for p in team2.players)
team1_favored = team1_names & favored_set
team2_favored = team2_names & favored_set
# The team with the favored players will always be listed first
if team1_favored and not team2_favored:
return -1
elif team2_favored and not team1_favored:
return 1
# The team with the most favored person will always come first
elif team1_favored and team2_favored:
team1_best = sorted(args.favored.index(n) for n in team1_favored)
team2_best = sorted(args.favored.index(n) for n in team2_favored)
return team1_best[-1] - team2_best[-1]
# If neither is favored, we'll order by number for now
# TODO: Allow command line specification of other orderings (maybe?)
else:
return team1.number - team2.number
return team_compare, player_compare
def generate_aspects(args, replay):
teams = sorted(replay.teams, args.team_compare)
matchups, team_strings = list(), list()
for team in teams:
team.players = sorted(team.players, args.player_compare)
composition = sorted(p.play_race[0].upper() for p in team.players)
matchups.append("".join(composition))
string = ", ".join(p.format(args.player_format) for p in team.players)
team_strings.append(string)
return sc2reader.utils.AttributeDict(
result=teams[0].result,
length=replay.length,
map=replay.map,
type=replay.type,
date=replay.date.strftime(args.date_format),
matchup="v".join(matchups),
teams=" vs ".join(team_strings),
)
def make_directory(args, path_parts):
directory = args.dest
for part in path_parts:
directory = os.path.join(directory, part)
if not os.path.exists(directory):
args.log.write("Creating subfolder: {0}\n".format(directory))
if not args.dryrun:
os.mkdir(directory)
elif not os.path.isdir(directory):
exit("Cannot create subfolder. Path is occupied: {0}", directory)
return directory
def scan(args, state):
args.log.write("SCANNING: {0}\n".format(args.source))
files = sc2reader.utils.get_files(
path=args.source,
regex=args.exclude_files,
allow=False,
exclude=args.exclude_dirs,
depth=args.depth,
followlinks=args.follow_links,
)
return filter(lambda f: os.path.getctime(f) > state.last_sync, files)
def exit(msg, *args, **kwargs):
sys.exit(msg.format(*args, **kwargs) + "\n\nScript Aborted.")
def reset(args):
if not os.path.exists(args.dest):
exit("Cannot reset, destination does not exist: {0}", args.dest)
elif not os.path.isdir(args.dest):
exit("Cannot reset, destination must be directory: {0}", args.dest)
print(
"About to reset directory: {0}\nAll files and subdirectories will be removed.".format(
args.dest
)
)
choice = raw_input("Proceed anyway? (y/n) ")
if choice.lower() == "y":
args.log.write("Removing old directory: {0}\n".format(args.dest))
if not args.dryrun:
print(args.dest)
shutil.rmtree(args.dest)
else:
sys.exit("Script Aborted")
def setup(args):
args.team_compare, args.player_compare = create_compare_funcs(args)
args.action = sc2reader.utils.AttributeDict(
type=args.action, run=shutil.copy if args.action == "COPY" else shutil.move
)
if not os.path.exists(args.source):
msg = "Source does not exist: {0}.\n\nScript Aborted."
sys.exit(msg.format(args.source))
elif not os.path.isdir(args.source):
msg = "Source is not a directory: {0}.\n\nScript Aborted."
sys.exit(msg.format(args.source))
if not os.path.exists(args.dest):
if not args.dryrun:
os.mkdir(args.dest)
else:
args.log.write("Creating destination: {0}\n".format(args.dest))
elif not os.path.isdir(args.dest):
sys.exit("Destination must be a directory.\n\nScript Aborted")
data_file = os.path.join(args.dest, "sc2autosave.dat")
args.log.write("Loading state from file: {0}\n".format(data_file))
if os.path.isfile(data_file) and not args.reset:
with open(data_file) as file:
return cPickle.load(file)
else:
return sc2reader.utils.AttributeDict(last_sync=0)
def save_state(state, args):
state.last_sync = time.time()
data_file = os.path.join(args.dest, "sc2autosave.dat")
if not args.dryrun:
with open(data_file, "w") as file:
cPickle.dump(state, file)
else:
args.log.write("Writing state to file: {0}\n".format(data_file))
def main():
parser = argparse.ArgumentParser(
description="Automatically copy new replays to directory",
fromfile_prefix_chars="@",
formatter_class=sc2reader.scripts.utils.Formatter.new(max_help_position=35),
epilog="And that's all folks",
)
required = parser.add_argument_group("Required Arguments")
required.add_argument("source", type=str, help="The source directory to poll")
required.add_argument("dest", type=str, help="The destination directory to copy to")
general = parser.add_argument_group("General Options")
general.add_argument(
"--mode",
dest="mode",
type=str,
choices=["BATCH", "CYCLE"],
default="BATCH",
help="The operating mode for the organizer",
)
general.add_argument(
"--action",
dest="action",
choices=["COPY", "MOVE"],
default="COPY",
type=str,
help="Have the organizer move your files instead of copying",
)
general.add_argument(
"--period",
dest="period",
type=int,
default=0,
help="The period of time to wait between scans.",
)
general.add_argument(
"--log",
dest="log",
metavar="LOGFILE",
type=argparse.FileType("w"),
default=sys.stdout,
help="Destination file for log information",
)
general.add_argument(
"--dryrun",
dest="dryrun",
action="store_true",
help="Don't do anything. Only simulate the output",
)
general.add_argument(
"--reset",
dest="reset",
action="store_true",
default=False,
help="Wipe the destination directory clean and start over.",
)
fileargs = parser.add_argument_group("File Options")
fileargs.add_argument(
"--depth",
dest="depth",
type=int,
default=-1,
help="Maximum recussion depth. -1 (default) is unlimited.",
)
fileargs.add_argument(
"--exclude-dirs",
dest="exclude_dirs",
type=str,
metavar="NAME",
nargs="+",
default=[],
help="A list of directory names to exclude during recursion",
)
fileargs.add_argument(
"--exclude-files",
dest="exclude_files",
type=str,
metavar="REGEX",
default="",
help="An expression to match excluded files",
)
fileargs.add_argument(
"--follow-links",
dest="follow_links",
action="store_true",
default=False,
help="Enable following of symbolic links while scanning",
)
renaming = parser.add_argument_group("Renaming Options")
renaming.add_argument(
"--rename",
dest="rename",
type=str,
metavar="FORMAT",
nargs="?",
default="{length} {type} on {map}",
help="""\
The renaming format string. can have the following values:
* {length} - The length of the replay ([H:]MM:SS)
* {type} - The type of the replay (1v1,2v2,4v4,etc)
* {map} - The map that was played on.
* {match} - Race matchup in team order, alphabetically by race.
* {date} - The date the replay was played on
* {teams} - The player line up
""",
)
renaming.add_argument(
"--length-format",
dest="length_format",
type=str,
metavar="FORMAT",
default="%M.%S",
help="The length format string. See the python time module for details",
)
renaming.add_argument(
"--player-format",
dest="player_format",
type=str,
metavar="FORMAT",
default="{name} ({play_race})",
help="The player format string used to render the :teams content item.",
)
renaming.add_argument(
"--date-format",
dest="date_format",
type=str,
metavar="FORMAT",
default="%m-%d-%Y",
help="The date format string used to render the :date content item.",
)
"""
renaming.add_argument('--team-order-by',
dest='team_order', type=str, metavar='FIELD', default='NUMBER',
help='The field by which teams are ordered.')
renaming.add_argument('--player-order-by',
dest='player_order', type=str, metavar='FIELD', default='NAME',
help='The field by which players are ordered on teams.')
"""
renaming.add_argument(
"--favored",
dest="favored",
type=str,
default=[],
metavar="NAME",
nargs="+",
help="A list of the players to favor in ordering teams and players",
)
filterargs = parser.add_argument_group("Filtering Options")
filterargs.add_argument(
"--filter-rule",
dest="filter_rule",
choices=["ALLOW", "DENY"],
help="The filters can either be used as a white list or a black list",
)
filterargs.add_argument(
"--filter-player",
metavar="NAME",
dest="filter_player",
nargs="+",
type=str,
default=[],
help="A list of players to filter on",
)
try:
run(parser.parse_args())
except KeyboardInterrupt:
print("\n\nScript Interrupted. Process Aborting")
if __name__ == "__main__":
main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import math
import warnings
import weakref
import numpy as np
from scipy import optimize as scipyoptimize
import nevergrad.common.typing as tp
from nevergrad.parametrization import parameter as p
from nevergrad.common import errors
from . import base
from .base import IntOrParameter
from . import recaster
class _NonObjectMinimizeBase(recaster.SequentialRecastOptimizer):
def __init__(
self,
parametrization: IntOrParameter,
budget: tp.Optional[int] = None,
num_workers: int = 1,
*,
method: str = "Nelder-Mead",
random_restart: bool = False,
) -> None:
super().__init__(parametrization, budget=budget, num_workers=num_workers)
self.multirun = 1 # work in progress
self._normalizer: tp.Any = None
self.initial_guess: tp.Optional[tp.ArrayLike] = None
# configuration
assert (
method
in [
"CmaFmin2",
"Nelder-Mead",
"COBYLA",
"SLSQP",
"Powell",
]
or "NLOPT" in method
), f"Unknown method '{method}'"
self.method = method
self.random_restart = random_restart
# The following line rescales to [0, 1] if fully bounded.
if method == "CmaFmin2" or "NLOPT" in method:
normalizer = p.helpers.Normalizer(self.parametrization)
if normalizer.fully_bounded:
self._normalizer = normalizer
def _internal_tell_not_asked(self, candidate: p.Parameter, loss: tp.Loss) -> None:
"""Called whenever calling "tell" on a candidate that was not "asked".
Defaults to the standard tell pipeline.
""" # We do not do anything; this just updates the current best.
def get_optimization_function(self) -> tp.Callable[[tp.Callable[[tp.ArrayLike], float]], tp.ArrayLike]:
return functools.partial(self._optimization_function, weakref.proxy(self))
@staticmethod
def _optimization_function(
weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]
) -> tp.ArrayLike:
# pylint:disable=unused-argument
budget = np.inf if weakself.budget is None else weakself.budget
best_res = np.inf
best_x: np.ndarray = weakself.current_bests["average"].x # np.zeros(self.dimension)
if weakself.initial_guess is not None:
best_x = np.array(weakself.initial_guess, copy=True) # copy, just to make sure it is not modified
remaining: float = budget - weakself._num_ask
while remaining > 0: # try to restart if budget is not elapsed
options: tp.Dict[str, tp.Any] = {} if weakself.budget is None else {"maxiter": remaining}
# options: tp.Dict[str, tp.Any] = {} if self.budget is None else {"maxiter": remaining}
if weakself.method[:5] == "NLOPT":
# This is NLOPT, used as in the PCSE simulator notebook.
# ( https://github.com/ajwdewit/pcse_notebooks ).
import nlopt
def nlopt_objective_function(*args):
data = np.asarray([arg for arg in args])[0]
assert len(data) == weakself.dimension, (
str(data) + " does not have length " + str(weakself.dimension)
)
if weakself._normalizer is not None:
data = weakself._normalizer.backward(np.asarray(data, dtype=np.float32))
return objective_function(data)
# Sbplx (based on Subplex) is used by default.
nlopt_param = (
getattr(nlopt, weakself.method[6:]) if len(weakself.method) > 5 else nlopt.LN_SBPLX
)
opt = nlopt.opt(nlopt_param, weakself.dimension)
# Assign the objective function calculator
opt.set_min_objective(nlopt_objective_function)
# Set the bounds.
opt.set_lower_bounds(np.zeros(weakself.dimension))
opt.set_upper_bounds(np.ones(weakself.dimension))
# opt.set_initial_step([0.05, 0.05])
opt.set_maxeval(budget)
# Start the optimization with the first guess
firstguess = 0.5 * np.ones(weakself.dimension)
best_x = opt.optimize(firstguess)
# print("\noptimum at TDWI: %s, SPAN: %s" % (x[0], x[1]))
# print("minimum value = ", opt.last_optimum_value())
# print("result code = ", opt.last_optimize_result())
# print("With %i function calls" % objfunc_calculator.n_calls)
if weakself._normalizer is not None:
best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32))
elif weakself.method == "CmaFmin2":
import cma # import inline in order to avoid matplotlib initialization warning
def cma_objective_function(data):
# Hopefully the line below does nothing if unbounded and rescales from [0, 1] if bounded.
if weakself._normalizer is not None:
data = weakself._normalizer.backward(np.asarray(data, dtype=np.float32))
return objective_function(data)
# cma.fmin2(objective_function, [0.0] * self.dimension, [1.0] * self.dimension, remaining)
x0 = 0.5 * np.ones(weakself.dimension)
num_calls = 0
while budget - num_calls > 0:
options = {"maxfevals": budget - num_calls, "verbose": -9}
if weakself._normalizer is not None:
# Tell CMA to work in [0, 1].
options["bounds"] = [0.0, 1.0]
res = cma.fmin(
cma_objective_function,
x0=x0,
sigma0=0.2,
options=options,
restarts=9,
)
x0 = 0.5 + np.random.uniform() * np.random.uniform(
low=-0.5, high=0.5, size=weakself.dimension
)
if res[1] < best_res:
best_res = res[1]
best_x = res[0]
if weakself._normalizer is not None:
best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32))
num_calls += res[2]
else:
res = scipyoptimize.minimize(
objective_function,
best_x
if not weakself.random_restart
else weakself._rng.normal(0.0, 1.0, weakself.dimension),
method=weakself.method,
options=options,
tol=0,
)
if res.fun < best_res:
best_res = res.fun
best_x = res.x
remaining = budget - weakself._num_ask
return best_x
class NonObjectOptimizer(base.ConfiguredOptimizer):
"""Wrapper over Scipy optimizer implementations, in standard ask and tell format.
This is actually an import from scipy-optimize, including Sequential Quadratic Programming,
Parameters
----------
method: str
Name of the method to use among:
- Nelder-Mead
- COBYLA
- SQP (or SLSQP): very powerful e.g. in continuous noisy optimization. It is based on
approximating the objective function by quadratic models.
- Powell
- NLOPT* (https://nlopt.readthedocs.io/en/latest/; by default, uses Sbplx, based on Subplex);
can be NLOPT,
NLOPT_LN_SBPLX,
NLOPT_LN_PRAXIS,
NLOPT_GN_DIRECT,
NLOPT_GN_DIRECT_L,
NLOPT_GN_CRS2_LM,
NLOPT_GN_AGS,
NLOPT_GN_ISRES,
NLOPT_GN_ESCH,
NLOPT_LN_COBYLA,
NLOPT_LN_BOBYQA,
NLOPT_LN_NEWUOA_BOUND,
NLOPT_LN_NELDERMEAD.
random_restart: bool
whether to restart at a random point if the optimizer converged but the budget is not entirely
spent yet (otherwise, restarts from best point)
Note
----
These optimizers do not support asking several candidates in a row
"""
recast = True
no_parallelization = True
# pylint: disable=unused-argument
def __init__(self, *, method: str = "Nelder-Mead", random_restart: bool = False) -> None:
super().__init__(_NonObjectMinimizeBase, locals())
NelderMead = NonObjectOptimizer(method="Nelder-Mead").set_name("NelderMead", register=True)
CmaFmin2 = NonObjectOptimizer(method="CmaFmin2").set_name("CmaFmin2", register=True)
NLOPT = NonObjectOptimizer(method="NLOPT").set_name("NLOPT", register=True)
Powell = NonObjectOptimizer(method="Powell").set_name("Powell", register=True)
RPowell = NonObjectOptimizer(method="Powell", random_restart=True).set_name("RPowell", register=True)
Cobyla = NonObjectOptimizer(method="COBYLA").set_name("Cobyla", register=True)
RCobyla = NonObjectOptimizer(method="COBYLA", random_restart=True).set_name("RCobyla", register=True)
SQP = NonObjectOptimizer(method="SLSQP").set_name("SQP", register=True)
SLSQP = SQP # Just so that people who are familiar with SLSQP naming are not lost.
RSQP = NonObjectOptimizer(method="SLSQP", random_restart=True).set_name("RSQP", register=True)
RSLSQP = RSQP # Just so that people who are familiar with SLSQP naming are not lost.
class _PymooMinimizeBase(recaster.SequentialRecastOptimizer):
def __init__(
self,
parametrization: IntOrParameter,
budget: tp.Optional[int] = None,
num_workers: int = 1,
*,
algorithm: str,
) -> None:
super().__init__(parametrization, budget=budget, num_workers=num_workers)
# configuration
self.algorithm = algorithm
self._no_hypervolume = True
self._initial_seed = -1
def get_optimization_function(self) -> tp.Callable[[tp.Callable[..., tp.Any]], tp.Optional[tp.ArrayLike]]:
if self._initial_seed == -1:
self._initial_seed = self._rng.randint(2**30)
return functools.partial(self._optimization_function, weakref.proxy(self))
# pylint:disable=useless-return
@staticmethod
def _optimization_function(
weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]
) -> tp.Optional[tp.ArrayLike]:
# pylint:disable=unused-argument, import-outside-toplevel
from pymoo import optimize as pymoooptimize
from pymoo.factory import get_algorithm as get_pymoo_algorithm
# from pymoo.factory import get_reference_directions
# reference direction code for when we want to use the other MOO optimizers in Pymoo
# if self.algorithm in [
# "rnsga2",
# "nsga3",
# "unsga3",
# "rnsga3",
# "moead",
# "ctaea",
# ]: # algorithms that require reference points or reference directions
# the appropriate n_partitions must be looked into
# ref_dirs = get_reference_directions("das-dennis", self.num_objectives, n_partitions=12)
# algorithm = get_pymoo_algorithm(self.algorithm, ref_dirs)
# else:
algorithm = get_pymoo_algorithm(weakself.algorithm)
problem = _create_pymoo_problem(weakself, objective_function)
pymoooptimize.minimize(problem, algorithm, seed=weakself._initial_seed)
return None
def _internal_ask_candidate(self) -> p.Parameter:
"""
Special version to make sure that num_objectives has been set before
the proper _internal_ask_candidate, in our parent class, is called.
"""
if self.num_objectives == 0:
# dummy ask i.e. not activating pymoo until num_objectives is set
warnings.warn(
"with this optimizer, it is more efficient to set num_objectives before the optimization begins",
errors.NevergradRuntimeWarning,
)
# We need to get a datapoint that is a random point in parameter space,
# and waste an evaluation on it.
return self.parametrization.spawn_child()
return super()._internal_ask_candidate()
def _internal_tell_candidate(self, candidate: p.Parameter, loss: float) -> None:
"""
Special version to make sure that we the extra initial evaluation which
we may have done in order to get num_objectives, is discarded.
Note that this discarding means that the extra point will not make it into
replay_archive_tell. Correspondingly, because num_objectives will make it into
the pickle, __setstate__ will never need a dummy ask.
"""
if self._messaging_thread is None:
return # dummy tell i.e. not activating pymoo until num_objectives is set
super()._internal_tell_candidate(candidate, loss)
def _post_loss(self, candidate: p.Parameter, loss: float) -> tp.Loss:
# pylint: disable=unused-argument
"""
Multi-Objective override for this function.
"""
return candidate.losses
class Pymoo(base.ConfiguredOptimizer):
"""Wrapper over Pymoo optimizer implementations, in standard ask and tell format.
This is actually an import from Pymoo Optimize.
Parameters
----------
algorithm: str
Use "algorithm-name" with following names to access algorithm classes:
Single-Objective
-"de"
-'ga'
-"brkga"
-"nelder-mead"
-"pattern-search"
-"cmaes"
Multi-Objective
-"nsga2"
Multi-Objective requiring reference directions, points or lines
-"rnsga2"
-"nsga3"
-"unsga3"
-"rnsga3"
-"moead"
-"ctaea"
Note
----
These optimizers do not support asking several candidates in a row
"""
recast = True
no_parallelization = True
# pylint: disable=unused-argument
def __init__(self, *, algorithm: str) -> None:
super().__init__(_PymooMinimizeBase, locals())
class _PymooBatchMinimizeBase(recaster.BatchRecastOptimizer):
# pylint: disable=abstract-method
def __init__(
self,
parametrization: IntOrParameter,
budget: tp.Optional[int] = None,
num_workers: int = 1,
*,
algorithm: str,
) -> None:
super().__init__(parametrization, budget=budget, num_workers=num_workers)
# configuration
self.algorithm = algorithm
self._no_hypervolume = True
self._initial_seed = -1
def get_optimization_function(self) -> tp.Callable[[tp.Callable[..., tp.Any]], tp.Optional[tp.ArrayLike]]:
if self._initial_seed == -1:
self._initial_seed = self._rng.randint(2**30)
return functools.partial(self._optimization_function, weakref.proxy(self))
# pylint:disable=useless-return
@staticmethod
def _optimization_function(
weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]
) -> tp.Optional[tp.ArrayLike]:
# pylint:disable=unused-argument, import-outside-toplevel
from pymoo import optimize as pymoooptimize
from pymoo.factory import get_algorithm as get_pymoo_algorithm
# from pymoo.factory import get_reference_directions
# reference direction code for when we want to use the other MOO optimizers in Pymoo
# if self.algorithm in [
# "rnsga2",
# "nsga3",
# "unsga3",
# "rnsga3",
# "moead",
# "ctaea",
# ]: # algorithms that require reference points or reference directions
# the appropriate n_partitions must be looked into
# ref_dirs = get_reference_directions("das-dennis", self.num_objectives, n_partitions=12)
# algorithm = get_pymoo_algorithm(self.algorithm, ref_dirs)
# else:
algorithm = get_pymoo_algorithm(weakself.algorithm)
problem = _create_pymoo_problem(weakself, objective_function, False)
pymoooptimize.minimize(problem, algorithm, seed=weakself._initial_seed)
return None
def _internal_ask_candidate(self) -> p.Parameter:
"""Reads messages from the thread in which the underlying optimization function is running
New messages are sent as "ask".
"""
# get a datapoint that is a random point in parameter space
if self.num_objectives == 0: # dummy ask i.e. not activating pymoo until num_objectives is set
warnings.warn(
"with this optimizer, it is more efficient to set num_objectives before the optimization begins",
errors.NevergradRuntimeWarning,
)
return self.parametrization.spawn_child()
return super()._internal_ask_candidate()
def _internal_tell_candidate(self, candidate: p.Parameter, loss: float) -> None:
"""Returns value for a point which was "asked"
(none asked point cannot be "tell")
"""
if self._messaging_thread is None:
return # dummy tell i.e. not activating pymoo until num_objectives is set
super()._internal_tell_candidate(candidate, loss)
def _post_loss(self, candidate: p.Parameter, loss: float) -> tp.Loss:
# pylint: disable=unused-argument
"""
Multi-Objective override for this function.
"""
return candidate.losses
class PymooBatch(base.ConfiguredOptimizer):
"""Wrapper over Pymoo optimizer implementations, in standard ask and tell format.
This is actually an import from Pymoo Optimize.
Parameters
----------
algorithm: str
Use "algorithm-name" with following names to access algorithm classes:
Single-Objective
-"de"
-'ga'
-"brkga"
-"nelder-mead"
-"pattern-search"
-"cmaes"
Multi-Objective
-"nsga2"
Multi-Objective requiring reference directions, points or lines
-"rnsga2"
-"nsga3"
-"unsga3"
-"rnsga3"
-"moead"
-"ctaea"
Note
----
These optimizers do not support asking several candidates in a row
"""
recast = True
# pylint: disable=unused-argument
def __init__(self, *, algorithm: str) -> None:
super().__init__(_PymooBatchMinimizeBase, locals())
def _create_pymoo_problem(
optimizer: base.Optimizer,
objective_function: tp.Callable[[tp.ArrayLike], float],
elementwise: bool = True,
):
kwargs = {}
try:
# pylint:disable=import-outside-toplevel
from pymoo.core.problem import ElementwiseProblem, Problem # type: ignore
Base = ElementwiseProblem if elementwise else Problem
except ImportError:
# Used if pymoo < 0.5.0
# pylint:disable=import-outside-toplevel
from pymoo.model.problem import Problem as Base # type: ignore
kwargs = {"elementwise_evaluation": elementwise}
class _PymooProblem(Base): # type: ignore
def __init__(self, optimizer, objective_function):
self.objective_function = objective_function
super().__init__(
n_var=optimizer.dimension,
n_obj=optimizer.num_objectives,
n_constr=0, # constraints handled already by nevergrad
xl=-math.pi * 0.5,
xu=math.pi * 0.5,
**kwargs,
)
def _evaluate(self, X, out, *args, **kwargs):
# pylint:disable=unused-argument
# pymoo is supplying us with bounded parameters in [-pi/2,pi/2]. Nevergrad wants unbounded reals from us.
out["F"] = self.objective_function(np.tan(X))
return _PymooProblem(optimizer, objective_function)
PymooNSGA2 = Pymoo(algorithm="nsga2").set_name("PymooNSGA2", register=True)
PymooBatchNSGA2 = PymooBatch(algorithm="nsga2").set_name("PymooBatchNSGA2", register=False)
|
# valid ranges
rules = []
while True:
try:
ln = input()
if not ln.strip():
break
rule = [x.split("-") for x in ln.split(": ")[1].split(" or ")]
for r in rule:
rules.append([int(x) for x in r])
except EOFError:
break
while True:
if not input().strip(): break
input()
inval_sum = 0
while True:
try:
ln = input()
vals = ln.split(',')
for v in vals:
if not any(r[0] <= int(v) <= r[1] for r in rules):
inval_sum += int(v)
except EOFError:
break
print(inval_sum)
|
import matplotlib as mp
import pandas as pd
import seaborn as sb
import report.config as config
from ..util import create_file, sort_dataframe
from .util import savefig, set_scales, set_labels, task_labels
def draw_parallel_coord(df, class_column,
x_labels=True, yscale='linear',
title=None, xlabel=None, ylabel=None,
legend_loc='best', legend_title=None, colormap=None):
colormap = config.colormap if colormap is None else colormap
with sb.axes_style('ticks', rc={'grid.linestyle': 'dotted'}), sb.plotting_context('paper'):
# print(sb.axes_style())
parallel_fig = mp.pyplot.figure(dpi=120, figsize=(10, df.shape[0]))
# select the first colors from the colormap to ensure we use the same colors as in the stripplot later
colors = mp.cm.get_cmap(colormap).colors[:len(df[class_column].unique())]
axes = pd.plotting.parallel_coordinates(df,
class_column=class_column,
color=colors,
axvlines=False,
)
set_scales(axes, yscale=yscale)
handles, labels = axes.get_legend_handles_labels()
axes.legend(handles, labels, loc=legend_loc, title=legend_title)
set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel, x_labels=x_labels,
x_tick_params=dict(labelrotation=90))
return parallel_fig
def draw_score_parallel_coord(col, results, type_filter='all', metadata=None,
x_sort_by='name', ylabel=None, filename=None,
**kwargs):
res_group = results.groupby(['type', 'task', 'framework'])
df = res_group[col].mean().unstack(['type', 'task'])
df = df if type_filter == 'all' \
else df.iloc[:, df.columns.get_loc(type_filter)]
if metadata:
sort_by = lambda cols: getattr(metadata[cols[1]], x_sort_by)
df = sort_dataframe(df, by=sort_by, axis=1)
df.reset_index(inplace=True)
fig = draw_parallel_coord(df,
'framework',
x_labels=task_labels(df.columns.drop('framework')),
# xlabel="Task",
ylabel=ylabel or "Score",
legend_title="Framework",
**kwargs)
if filename:
savefig(fig, create_file("graphics", config.results_group, filename))
return fig
|
async def greet(ctx):
greetings = [
"Ahn nyong ha se yo",
"Ahn-nyong-ha-se-yo",
"Ahoj",
"An-nyŏng-ha-se-yo",
"As-salamu alaykum",
"Assalamo aleikum",
"Assalamualaikum",
"Avuxeni",
"Bonġu",
"Bonjour",
"Bună ziua",
"Ciao",
"Cześć",
"Dia dhuit",
"Dobar dan",
"Dobra većer",
"Dobro jutro",
"God dag",
"Góðan dag",
"Grüß gott",
"Guten tag",
"Hafa adai",
"Hallå",
"Hallo",
"Hello",
"Hoi",
"Hola",
"How ya doing",
"How you doing",
"Howdy",
"Hujambo",
"Hyvää päivää",
"Ia orna",
"Jo napot",
"Konnichiwa",
"Marhaba",
"Merhaba",
"Moïen",
"Namaskar",
"Namaste",
"Namastē",
"Nde-ewo",
"Nǐ hǎo",
"Niltze",
"Now then",
"Olá",
"Salam",
"Salve",
"Sawasdee",
"Sawubona",
"Selamat siang",
"Shalom",
"Shwmae",
"Sveiki",
"Wassup",
"What's up",
"Xin chào",
"Yasou",
"Zdraveite",
"Zdravo",
"Zdravstvuyte",
"안녕하세요",
"こんにちは",
"你好",
]
message = ctx.content.lower()
# if no one is tagged in the message
if "@" not in message:
message_greetings = []
# check if any of the greetings are in the message
for greeting in greetings:
if greeting.lower() in message:
message_greetings.append(greeting)
# if any are, format them into a greeting back to the user
if len(message_greetings) > 0:
greetings_string = message_greetings[0]
if len(message_greetings) > 1:
first_greeting = message_greetings[0]
other_greetings = []
for greeting in message_greetings[1 : len(message_greetings)]:
other_greetings.append(greeting.lower())
all_greetings = [first_greeting] + other_greetings
if len(message_greetings) > 2:
greetings_string = (
f"{', '.join(all_greetings[0:-1])} and {all_greetings[-1]}"
)
else:
greetings_string = " and ".join(all_greetings)
# respond to user
await ctx.channel.send(f"{greetings_string}, @{ctx.author.name}!")
|
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.schema import FetchedValue
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from app.api.utils.models_mixins import Base
from app.extensions import db
from app.api.now_applications.models.activity_summary.activity_summary_base import ActivitySummaryBase
class CutLinesPolarizationSurvey(ActivitySummaryBase):
__mapper_args__ = {
'polymorphic_identity': 'cut_lines_polarization_survey', ## type code
}
## NO TABLE FOR THIS TYPE
details = db.relationship(
'CutLinesPolarizationSurveyDetail',
secondary='activity_summary_detail_xref',
load_on_pending=True)
@hybrid_property
def calculated_total_disturbance(self):
return self.calculate_total_disturbance_area(self.details)
def __repr__(self):
return '<CutLinesPolarizationSurvey %r>' % self.activity_summary_id
|
from django.apps import AppConfig
class ContactConfig(AppConfig):
name = 'contacts'
def ready(self):
import contacts.signals
|
#!/usr/bin/env python3
import re
import sys
import sqlite3
import traceback
import os
__location__ = os.path.realpath(
os.path.join(
os.getcwd(),
os.path.dirname(__file__)
)
)
input_failures = 0
try:
DATABASE_NAME = os.path.join(__location__, 'data.sqlite')
conn = sqlite3.connect(DATABASE_NAME)
i = 0
for line in sys.stdin:
l = line.strip()
match = re.search('^(\w+)\s+([\w\-\:]+)\s+(\w+)\s+((\w+|-))\s+OK', l)
if not match:
input_failures += 1
print(f'Error: Not matched input line: {l}')
continue
date_part = match.group(2).split('T')
data = {
'date': date_part[0],
'time': '',
'area': os.environ['SCRAPER_KEY'],
'tested': None,
'confirmed': int(match.group(3)),
'hospitalized': None,
'icu': None,
'vent': None,
'released': None,
'deceased': match.group(4),
'source': os.environ['SCRAPER_SOURCE']
}
if len(date_part) == 2:
data['time'] = date_part[1]
if (data['deceased'] == '-'):
data['deceased'] = None
else:
data['deceased'] = int(data['deceased'])
c = conn.cursor()
try:
print(data)
c.execute(
'''
INSERT INTO data (
date,
time,
abbreviation_canton_and_fl,
ncumul_tested,
ncumul_conf,
ncumul_hosp,
ncumul_ICU,
ncumul_vent,
ncumul_released,
ncumul_deceased,
source
)
VALUES
(?,?,?,?,?,?,?,?,?,?,?)
''',
[
data['date'],
data['time'],
data['area'],
data['tested'],
data['confirmed'],
data['hospitalized'],
data['icu'],
data['vent'],
data['released'],
data['deceased'],
data['source'],
]
)
except sqlite3.IntegrityError:
print("Error: Data for this date has already been added")
finally:
conn.commit()
except Exception as e:
print("Error: %s" % e)
print(traceback.format_exc())
sys.exit(1)
finally:
conn.close()
if input_failures:
sys.exit(1)
|
"""
Utility functions using the pyesgf package.
"""
import sys
from urllib.parse import quote_plus
def ats_url(base_url):
"""
Return the URL for the ESGF SAML AttributeService
"""
# Strip '/' from url as necessary
base_url = base_url.rstrip('/')
return '/'.join([base_url,
'esgf-idp/saml/soap/secure/attributeService.htm'])
def get_manifest(drs_id, version, connection):
"""
Retrieve the filenames, sizes and checksums of a dataset.
This function will raise ValueError if more than one dataset is found
matching the given drs_id and version on a search without replicas.
The connection should be either distrib=True or be connected to a suitable
ESGF search interface.
:param drs_id: a string containing the DRS identifier without version
:param version: The version as a string or int
"""
if isinstance(version, int):
version = str(version)
context = connection.new_context(drs_id=drs_id, version=version)
results = context.search()
if len(results) > 1:
raise ValueError("Search for dataset %s.v%s returns multiple hits" %
(drs_id, version))
file_context = results[0].file_context()
manifest = {}
for file in file_context.search():
manifest[file.filename] = {
'checksum_type': file.checksum_type,
'checksum': file.checksum,
'size': file.size,
}
return manifest
def urlencode(query):
"""
Encode a sequence of two-element tuples or dictionary into a URL query
string.
This version is adapted from the standard library to understand operators
in the pyesgf.search.constraints module.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query, "items"):
# mapping objects
query = list(query.items())
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object", tb)
def append(k, v, tag, lst):
from .search.consts import OPERATOR_NEQ
if tag == OPERATOR_NEQ:
lst.append('%s!=%s' % (k, v))
elif tag is None:
lst.append('%s=%s' % (k, v))
else:
raise ValueError('Unknown operator tag %s' % tag)
def strip_tag(v):
if isinstance(v, tuple):
tag, v = v
else:
tag = None
return tag, v
lst = []
for k, v in query:
tag, v = strip_tag(v)
k = quote_plus(str(k))
if isinstance(v, str):
if hasattr(v, 'encode'):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII", "replace"))
else:
v = quote_plus(v)
append(k, v, tag, lst)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
append(k, v, tag, lst)
else:
# loop over the sequence
for elt in v:
append(k, quote_plus(str(elt)), tag, lst)
return '&'.join(lst)
|
from datetime import datetime
from decimal import Decimal
import os
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.forms.formsets import formset_factory
import commonware.log
import happyforms
from quieter_formset.formset import BaseFormSet
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
from access import acl
import amo
import captcha.fields
from amo.fields import ColorField
from amo.urlresolvers import reverse
from amo.utils import slug_validator, slugify, sorted_groupby, remove_icons
from addons.models import (Addon, AddonCategory, BlacklistedSlug, Category,
Persona)
from addons.tasks import save_theme, save_theme_reupload
from addons.utils import reverse_name_lookup
from addons.widgets import IconWidgetRenderer, CategoriesSelectMultiple
from devhub import tasks as devhub_tasks
from tags.models import Tag
from translations import LOCALES
from translations.fields import TransField, TransTextarea
from translations.forms import TranslationFormMixin
from translations.models import Translation
from translations.utils import transfield_changed
from translations.widgets import TranslationTextInput
from users.models import UserEmailField
from versions.models import Version
log = commonware.log.getLogger('z.addons')
def clean_name(name, instance=None):
if not instance:
log.debug('clean_name called without an instance: %s' % name)
id = reverse_name_lookup(name)
# If we get an id and either there's no instance or the instance.id != id.
if id and (not instance or id != instance.id):
raise forms.ValidationError(_('This name is already in use. Please '
'choose another.'))
return name
def clean_slug(slug, instance):
slug_validator(slug, lower=False)
if slug != instance.slug:
if Addon.objects.filter(slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlacklistedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.' % slug))
return slug
def clean_tags(request, tags):
target = [slugify(t, spaces=True, lower=True) for t in tags.split(',')]
target = set(filter(None, target))
min_len = amo.MIN_TAG_LENGTH
max_len = Tag._meta.get_field('tag_text').max_length
max_tags = amo.MAX_TAGS
total = len(target)
blacklisted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, blacklisted=True))
if blacklisted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ngettext('Invalid tag: {0}', 'Invalid tags: {0}',
len(blacklisted)).format(', '.join(blacklisted))
raise forms.ValidationError(msg)
restricted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, restricted=True))
if not acl.action_allowed(request, 'Addons', 'Edit'):
if restricted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ngettext('"{0}" is a reserved tag and cannot be used.',
'"{0}" are reserved tags and cannot be used.',
len(restricted)).format('", "'.join(restricted))
raise forms.ValidationError(msg)
else:
# Admin's restricted tags don't count towards the limit.
total = len(target - set(restricted))
if total > max_tags:
num = total - max_tags
msg = ngettext('You have {0} too many tags.',
'You have {0} too many tags.', num).format(num)
raise forms.ValidationError(msg)
if any(t for t in target if len(t) > max_len):
raise forms.ValidationError(
_('All tags must be %s characters or less after invalid characters'
' are removed.' % max_len))
if any(t for t in target if len(t) < min_len):
msg = ngettext("All tags must be at least {0} character.",
"All tags must be at least {0} characters.",
min_len).format(min_len)
raise forms.ValidationError(msg)
return target
class AddonFormBase(TranslationFormMixin, happyforms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonFormBase, self).__init__(*args, **kw)
class Meta:
models = Addon
fields = ('name', 'slug', 'summary', 'tags')
def clean_slug(self):
return clean_slug(self.cleaned_data['slug'], self.instance)
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if acl.action_allowed(self.request, 'Addons', 'Edit'):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
class AddonFormBasic(AddonFormBase):
name = TransField(max_length=50)
slug = forms.CharField(max_length=30)
summary = TransField(widget=TransTextarea(attrs={'rows': 4}),
max_length=250)
tags = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'summary', 'tags')
def __init__(self, *args, **kw):
super(AddonFormBasic, self).__init__(*args, **kw)
self.fields['tags'].initial = ', '.join(self.get_tags(self.instance))
# Do not simply append validators, as validators will persist between
# instances.
def validate_name(name):
return clean_name(name, self.instance)
name_validators = list(self.fields['name'].validators)
name_validators.append(validate_name)
self.fields['name'].validators = name_validators
def save(self, addon, commit=False):
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AddonFormBasic, self).save(commit=False)
addonform.save()
return addonform
class AppFormBasic(AddonFormBasic):
"""Form to override name length for apps."""
name = TransField(max_length=128)
class CategoryForm(forms.Form):
application = forms.TypedChoiceField(amo.APPS_CHOICES, coerce=int,
widget=forms.HiddenInput,
required=False)
categories = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(), widget=CategoriesSelectMultiple)
def save(self, addon):
application = self.cleaned_data.get('application')
categories_new = self.cleaned_data['categories']
categories_old = [cats for app, cats in addon.app_categories if
(app and application and app.id == application)
or (not app and not application)]
if categories_old:
categories_old = categories_old[0]
# Add new categories.
for c in set(categories_new) - set(categories_old):
AddonCategory(addon=addon, category=c).save()
# Remove old categories.
for c in set(categories_old) - set(categories_new):
AddonCategory.objects.filter(addon=addon, category=c).delete()
def clean_categories(self):
categories = self.cleaned_data['categories']
total = categories.count()
max_cat = amo.MAX_CATEGORIES
if getattr(self, 'disabled', False) and total:
raise forms.ValidationError(
_('Categories cannot be changed while your add-on is featured '
'for this application.'))
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
has_misc = filter(lambda x: x.misc, categories)
if has_misc and total > 1:
raise forms.ValidationError(
_('The miscellaneous category cannot be combined with '
'additional categories.'))
return categories
class BaseCategoryFormSet(BaseFormSet):
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.request = kw.pop('request', None)
super(BaseCategoryFormSet, self).__init__(*args, **kw)
self.initial = []
apps = sorted(self.addon.compatible_apps.keys(),
key=lambda x: x.id)
# Drop any apps that don't have appropriate categories.
qs = Category.objects.filter(type=self.addon.type)
app_cats = dict((k, list(v)) for k, v in
sorted_groupby(qs, 'application'))
for app in list(apps):
if app and not app_cats.get(app.id):
apps.remove(app)
if not app_cats:
apps = []
for app in apps:
cats = dict(self.addon.app_categories).get(app, [])
self.initial.append({'categories': [c.id for c in cats]})
for app, form in zip(apps, self.forms):
key = app.id if app else None
form.request = self.request
form.initial['application'] = key
form.app = app
cats = sorted(app_cats[key], key=lambda x: x.name)
form.fields['categories'].choices = [(c.id, c.name) for c in cats]
# If this add-on is featured for this application, category
# changes are forbidden.
if not acl.action_allowed(self.request, 'Addons', 'Edit'):
form.disabled = (app and self.addon.is_featured(app))
def save(self):
for f in self.forms:
f.save(self.addon)
CategoryFormSet = formset_factory(form=CategoryForm,
formset=BaseCategoryFormSet, extra=0)
def icons():
"""
Generates a list of tuples for the default icons for add-ons,
in the format (pseudo-mime-type, description).
"""
icons = [('image/jpeg', 'jpeg'), ('image/png', 'png'), ('', 'default')]
dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH)
for fname in files:
if '32' in fname and 'default' not in fname:
icon_name = fname.split('-')[0]
icons.append(('icon/%s' % icon_name, icon_name))
return icons
class AddonFormMedia(AddonFormBase):
icon_type = forms.CharField(widget=forms.RadioSelect(
renderer=IconWidgetRenderer, choices=[]), required=False)
icon_upload_hash = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('icon_upload_hash', 'icon_type')
def __init__(self, *args, **kwargs):
super(AddonFormMedia, self).__init__(*args, **kwargs)
# Add icons here so we only read the directory when
# AddonFormMedia is actually being used.
self.fields['icon_type'].widget.choices = icons()
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
devhub_tasks.resize_icon.delay(upload_path, destination,
amo.ADDON_ICON_SIZES,
set_modified_on=[addon])
return super(AddonFormMedia, self).save(commit)
class AddonFormDetails(AddonFormBase):
default_locale = forms.TypedChoiceField(choices=LOCALES)
class Meta:
model = Addon
fields = ('description', 'default_locale', 'homepage')
def clean(self):
# Make sure we have the required translations in the new locale.
required = 'name', 'summary', 'description'
data = self.cleaned_data
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = self.cleaned_data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
# They might be setting description right now.
if 'description' in missing and locale in data['description']:
missing.remove('description')
if missing:
raise forms.ValidationError(
_('Before changing your default locale you must have a '
'name, summary, and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return data
class AddonFormSupport(AddonFormBase):
support_url = TransField.adapt(forms.URLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Addon
fields = ('support_email', 'support_url')
def __init__(self, *args, **kw):
super(AddonFormSupport, self).__init__(*args, **kw)
def save(self, addon, commit=True):
return super(AddonFormSupport, self).save(commit)
class AddonFormTechnical(AddonFormBase):
developer_comments = TransField(widget=TransTextarea, required=False)
class Meta:
model = Addon
fields = ('developer_comments', 'view_source', 'site_specific',
'external_software', 'auto_repackage', 'public_stats',
'whiteboard')
class AddonForm(happyforms.ModelForm):
name = forms.CharField(widget=TranslationTextInput,)
homepage = forms.CharField(widget=TranslationTextInput, required=False)
eula = forms.CharField(widget=TranslationTextInput,)
description = forms.CharField(widget=TranslationTextInput,)
developer_comments = forms.CharField(widget=TranslationTextInput,)
privacy_policy = forms.CharField(widget=TranslationTextInput,)
the_future = forms.CharField(widget=TranslationTextInput,)
the_reason = forms.CharField(widget=TranslationTextInput,)
support_email = forms.CharField(widget=TranslationTextInput,)
class Meta:
model = Addon
fields = ('name', 'homepage', 'default_locale', 'support_email',
'support_url', 'description', 'summary',
'developer_comments', 'eula', 'privacy_policy', 'the_reason',
'the_future', 'view_source', 'prerelease', 'site_specific',)
exclude = ('status', )
def clean_name(self):
return clean_name(self.cleaned_data['name'])
def save(self):
desc = self.data.get('description')
if desc and desc != unicode(self.instance.description):
amo.log(amo.LOG.EDIT_DESCRIPTIONS, self.instance)
if self.changed_data:
amo.log(amo.LOG.EDIT_PROPERTIES, self.instance)
super(AddonForm, self).save()
class AbuseForm(happyforms.Form):
recaptcha = captcha.fields.ReCaptchaField(label='')
text = forms.CharField(required=True,
label='',
widget=forms.Textarea())
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(AbuseForm, self).__init__(*args, **kwargs)
if (not self.request.user.is_anonymous() or
not settings.RECAPTCHA_PRIVATE_KEY):
del self.fields['recaptcha']
class ThemeFormBase(AddonFormBase):
def __init__(self, *args, **kwargs):
super(ThemeFormBase, self).__init__(*args, **kwargs)
cats = Category.objects.filter(type=amo.ADDON_PERSONA, weight__gte=0)
cats = sorted(cats, key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
for field in ('header', 'footer'):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.upload_persona',
args=['persona_%s' % field]),
'data-allowed-types': 'image/jpeg|image/png'
}
def clean_name(self):
return clean_name(self.cleaned_data['name'])
def clean_slug(self):
return clean_slug(self.cleaned_data['slug'], self.instance)
class ThemeForm(ThemeFormBase):
name = forms.CharField(max_length=50)
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 4}),
max_length=500, required=False)
tags = forms.CharField(required=False)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES,
coerce=int, empty_value=None, widget=forms.HiddenInput,
error_messages={'required': _lazy(u'A license must be selected.')})
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput)
footer = forms.FileField(required=False)
footer_hash = forms.CharField(widget=forms.HiddenInput, required=False)
# Native color picker doesn't allow real time tracking of user input
# and empty values, thus force the JavaScript color picker for now.
# See bugs 1005206 and 1003575.
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
agreed = forms.BooleanField()
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors. It's really clever.
unsaved_data = forms.CharField(required=False, widget=forms.HiddenInput)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def save(self, commit=False):
data = self.cleaned_data
addon = Addon.objects.create(
slug=data.get('slug'),
status=amo.STATUS_PENDING, type=amo.ADDON_PERSONA)
addon.name = {'en-US': data['name']}
if data.get('description'):
addon.description = data['description']
addon._current_version = Version.objects.create(addon=addon,
version='0')
addon.save()
# Create Persona instance.
p = Persona()
p.persona_id = 0
p.addon = addon
p.header = 'header.png'
if data['footer_hash']:
p.footer = 'footer.png'
if data['accentcolor']:
p.accentcolor = data['accentcolor'].lstrip('#')
if data['textcolor']:
p.textcolor = data['textcolor'].lstrip('#')
p.license = data['license']
p.submit = datetime.now()
user = self.request.amo_user
p.author = user.username
p.display_username = user.name
p.save()
# Save header, footer, and preview images.
save_theme.delay(data['header_hash'], data['footer_hash'], addon)
# Save user info.
addon.addonuser_set.create(user=user, role=amo.AUTHOR_ROLE_OWNER)
# Save tags.
for t in data['tags']:
Tag(tag_text=t).save_tag(addon)
# Save categories.
AddonCategory(addon=addon, category=data['category']).save()
return addon
class EditThemeForm(AddonFormBase):
name = TransField(max_length=50, label=_lazy('Give Your Theme a Name.'))
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = TransField(
widget=TransTextarea(attrs={'rows': 4}),
max_length=500, required=False, label=_lazy('Describe your Theme.'))
tags = forms.CharField(required=False)
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES, coerce=int, empty_value=None,
widget=forms.HiddenInput,
error_messages={'required': _lazy(u'A license must be selected.')})
# Theme re-upload.
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput, required=False)
footer = forms.FileField(required=False)
footer_hash = forms.CharField(widget=forms.HiddenInput, required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonFormBase, self).__init__(*args, **kw)
addon = Addon.objects.no_cache().get(id=self.instance.id)
persona = addon.persona
# Do not simply append validators, as validators will persist between
# instances.
self.fields['name'].validators = list(self.fields['name'].validators)
self.fields['name'].validators.append(lambda x: clean_name(x, addon))
# Allow theme artists to localize Name and Description.
for trans in Translation.objects.filter(id=self.initial['name']):
self.initial['name_' + trans.locale.lower()] = trans
for trans in Translation.objects.filter(
id=self.initial['description']):
self.initial['description_' + trans.locale.lower()] = trans
self.old_tags = self.get_tags(addon)
self.initial['tags'] = ', '.join(self.old_tags)
if persona.accentcolor:
self.initial['accentcolor'] = '#' + persona.accentcolor
if persona.textcolor:
self.initial['textcolor'] = '#' + persona.textcolor
self.initial['license'] = persona.license
cats = sorted(Category.objects.filter(type=amo.ADDON_PERSONA,
weight__gte=0),
key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
try:
self.initial['category'] = addon.categories.values_list(
'id', flat=True)[0]
except IndexError:
pass
for field in ('header', 'footer'):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.reupload_persona',
args=[addon.slug,
'persona_%s' % field]),
'data-allowed-types': 'image/jpeg|image/png'
}
def save(self):
addon = self.instance
persona = addon.persona
data = self.cleaned_data
# Update Persona-specific data.
persona_data = {
'license': int(data['license']),
'accentcolor': data['accentcolor'].lstrip('#'),
'textcolor': data['textcolor'].lstrip('#'),
'author': self.request.amo_user.username,
'display_username': self.request.amo_user.name
}
changed = False
for k, v in persona_data.iteritems():
if v != getattr(persona, k):
changed = True
setattr(persona, k, v)
if changed:
persona.save()
if self.changed_data:
amo.log(amo.LOG.EDIT_PROPERTIES, addon)
self.instance.modified = datetime.now()
# Update Addon-specific data.
changed = (
set(self.old_tags) != data['tags'] or # Check if tags changed.
self.initial['slug'] != data['slug'] or # Check if slug changed.
transfield_changed('description', self.initial, data) or
transfield_changed('name', self.initial, data))
if changed:
# Only save if addon data changed.
super(EditThemeForm, self).save()
# Update tags.
tags_new = data['tags']
tags_old = [slugify(t, spaces=True) for t in self.old_tags]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# Update category.
if data['category'].id != self.initial['category']:
addon_cat = addon.addoncategory_set.all()[0]
addon_cat.category = data['category']
addon_cat.save()
# Theme reupload.
if not addon.is_pending():
if data['header_hash'] or data['footer_hash']:
save_theme_reupload.delay(
data['header_hash'], data['footer_hash'], addon)
return data
class EditThemeOwnerForm(happyforms.Form):
owner = UserEmailField()
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(EditThemeOwnerForm, self).__init__(*args, **kw)
addon = self.instance
self.fields['owner'].widget.attrs['placeholder'] = _(
"Enter a new author's email address")
try:
self.instance_addonuser = addon.addonuser_set.all()[0]
self.initial['owner'] = self.instance_addonuser.user.email
except IndexError:
# If there was never an author before, then don't require one now.
self.instance_addonuser = None
self.fields['owner'].required = False
def save(self):
data = self.cleaned_data
if data.get('owner'):
changed = (not self.instance_addonuser or
self.instance_addonuser != data['owner'])
if changed:
# Update Persona-specific data.
persona = self.instance.persona
persona.author = data['owner'].username
persona.display_username = data['owner'].name
persona.save()
if not self.instance_addonuser:
# If there previously never another owner, create one.
self.instance.addonuser_set.create(user=data['owner'],
role=amo.AUTHOR_ROLE_OWNER)
elif self.instance_addonuser != data['owner']:
# If the owner has changed, update the `AddonUser` object.
self.instance_addonuser.user = data['owner']
self.instance_addonuser.role = amo.AUTHOR_ROLE_OWNER
self.instance_addonuser.save()
self.instance.modified = datetime.now()
self.instance.save()
return data
class ContributionForm(happyforms.Form):
amount = forms.DecimalField(required=True, min_value=Decimal('0.01'))
|
import os
from glob import glob
import pandas as pd
def get_list_of_full_child_dirs(d):
"""
For a directory d (full path),
return a list of its subdirectories
in a full path form.
"""
children = (os.path.join(d, child) for child in os.listdir(d))
dirs = filter(os.path.isdir, children)
return list(dirs)
def split_full_path(full_path, base_dir):
"""
Given a full path, return:
- relative_dir: the part of the path that does not
include the base directory and the basename
- basename
"""
fname = os.path.basename(full_path)
relative_path = full_path.split(base_dir)[-1]
relative_dir = relative_path.split(fname)[0]
relative_dir = relative_dir[1:-1] # clip slashes
return relative_dir, fname
def gather_files(base_dir, file_mask):
"""
Walk the directory base_dir using os.walk
and gather files that match file_mask (e.g. '*.jpg').
Return the result as a Pandas dataframe with columns
'relative_dir' and 'basename'.
"""
res_tuples = []
for dir_name, subdirs, files in os.walk(base_dir):
dir_has_files = len(files) > 0
if dir_has_files:
full_mask = os.path.join(dir_name, file_mask)
mask_matches = glob(full_mask)
res_tuples += [split_full_path(f, base_dir) for f in mask_matches]
return pd.DataFrame(res_tuples, columns=['relative_dir', 'basename'])
|
def coding_problem_31(s, t, debt=0):
"""
Given two strings, compute the edit distance between them.
The edit distance between two strings refers to the minimum number of character insertions, deletions, and
substitutions required to change one string to the other.
Example:
>>> coding_problem_31("kitten", "sitting") # k>>s, e>>i, +g
3
>>> coding_problem_31("kitten", "cat") # k>>c, i>>a, -ten
5
>>> coding_problem_31("black", "white")
5
>>> coding_problem_31("top", "dog")
2
"""
pass
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
import torch
from .elliptical_slice import EllipticalSliceSampler
class MeanEllipticalSliceSampler(EllipticalSliceSampler):
def __init__(self, f_init, dist, lnpdf, nsamples, pdf_params=()):
"""
Implementation of elliptical slice sampling (Murray, Adams, & Mckay, 2010).
f_init: initial value of `f`
dist: multivariate normal to sample from to sample from
lnpdf: likelihood function
n_samples: number of samples
pdf_params: callable arguments for lnpdf
"""
mean_vector = dist.mean
demeaned_lnpdf = lambda g: lnpdf(g + mean_vector, *pdf_params)
demeaned_init = f_init - mean_vector
samples = dist.sample(sample_shape = torch.Size((nsamples,))).transpose(-1, -2)
demeaned_samples = samples - mean_vector.unsqueeze(1)
super(MeanEllipticalSliceSampler, self).__init__(demeaned_init, demeaned_samples, demeaned_lnpdf, nsamples, pdf_params=())
self.mean_vector = mean_vector
def run(self):
self.f_sampled, self.ell = super().run()
#add means back into f_sampled
self.f_sampled = self.f_sampled + self.mean_vector.unsqueeze(1)
return self.f_sampled, self.ell
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations:
"""ServiceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _registries_delete_initial(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-05-01-preview"
accept = "application/json"
# Construct URL
url = self._registries_delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_registries_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/serviceRegistries/default'} # type: ignore
async def begin_registries_delete(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Disable the default Service Registry.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._registries_delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_registries_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/serviceRegistries/default'} # type: ignore
|
# coding:utf-8
'''
python 3.5
mxnet 1.3.0
gluoncv 0.3.0
visdom 0.1.7
gluonbook 0.6.9
auther: helloholmes
'''
import mxnet as mx
import numpy as np
import os
import time
import pickle
from mxnet import gluon
from mxnet import init
from mxnet import nd
from mxnet import autograd
from mxnet.gluon import nn
class VGG16(nn.HybridBlock):
# input size (b, 3, 224, 224)
def __init__(self, num_classes=120, **kwargs):
super(VGG16, self).__init__(**kwargs)
model = gluon.model_zoo.vision.get_model('vgg16', pretrained=True)
with self.name_scope():
self.features = model.features
self.output = nn.Dense(num_classes)
def initialize(self, ctx=None):
for param in self.collect_params().values():
if param._data is not None:
continue
else:
param.initialize()
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
if __name__ == '__main__':
m = VGG16()
m.initialize()
data = mx.nd.random.uniform(shape=(1, 3, 224, 224))
out = m(data)
print(out.shape)
|
import os
import numpy as np
def get_lax_sod_network():
return [12, 12, 10, 12, 10, 12, 10, 10, 12,1]
def get_lax_sod_data_inner():
data_path = os.environ.get("LAX_SOD_REPO_PATH", "../lax_sod_tube")
qmc_points = np.loadtxt(os.path.join(data_path, "parameters/parameters_sobol_X.txt"))
forces = np.loadtxt(os.path.join(data_path, "functionals/average_functionals_sobol_2048.txt"))
data_per_func = {}
force_names = [*[f'q{k+1}' for k in range(3)],
*[f'EK{k+1}' for k in range(3)]]
for n, force_name in enumerate(force_names):
data_per_func[force_name] = forces[:, n]
return qmc_points, data_per_func
def get_lax_sod_data():
qmc_points, qmc_values = get_lax_sod_data_inner()
mc_params, mc_values = get_lax_sod_data_mc_inner()
return qmc_points, qmc_values, mc_params, mc_values
def get_lax_sod_data_mc_inner():
data_path = os.environ.get("LAX_SOD_REPO_PATH", "../lax_sod_tube")
mc_points = np.loadtxt(os.path.join(data_path, "parameters/parameters_mc_X.txt"))
forces = np.loadtxt(os.path.join(data_path, "functionals/average_functionals_mc_2048.txt"))
data_per_func = {}
force_names = [*[f'q{k+1}' for k in range(3)],
*[f'EK{k+1}' for k in range(3)]]
for n, force_name in enumerate(force_names):
data_per_func[force_name] = forces[:, n]
return mc_points, data_per_func
def get_lax_sod_data_mc():
mc_params, mc_values = get_lax_sod_data_mc_inner()
qmc_params, qmc_values = get_lax_sod_data_inner()
return mc_params, mc_values, qmc_params, qmc_values
def make_folders():
folders = ['img', 'img_tikz', 'tables', 'results']
for folder in folders:
if not os.path.exists(folder):
os.mkdir(folder)
|
#
# The BSD License
#
# Copyright (c) 2008, Florian Noeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# Neither the name of the of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# just import this file to get all the paths setup up
import sys
import os
sys.path.append(os.path.normpath(os.path.join(sys.path[0], '..', '..', '3rdparty', 'pylibs')))
sys.path.append(os.path.normpath(os.path.join(sys.path[0], 'grammar')))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 Salvador E. Tropea
# Copyright (c) 2020-2021 Instituto Nacional de Tecnologïa Industrial
# License: Apache 2.0
# Project: KiAuto (formerly kicad-automation-scripts)
import os
import re
import json
import configparser
from contextlib import contextmanager
from sys import exit, path
# Default W,H for recording
REC_W = 1366
REC_H = 960
# Return error codes
# Positive values are ERC/DRC errors
NO_SCHEMATIC = 1
WRONG_ARGUMENTS = 2 # This is what argsparse uses
EESCHEMA_CFG_PRESENT = 11
KICAD_CFG_PRESENT = 3
NO_PCB = 4
PCBNEW_CFG_PRESENT = 5
WRONG_LAYER_NAME = 6
WRONG_PCB_NAME = 7
WRONG_SCH_NAME = 8
PCBNEW_ERROR = 9
EESCHEMA_ERROR = 10
NO_PCBNEW_MODULE = 11
USER_HOTKEYS_PRESENT = 12
CORRUPTED_PCB = 13
# Wait 40 s to pcbnew/eeschema window to be present
WAIT_START = 60
# Name for testing versions
NIGHTLY = 'nightly'
# Scale factor for the timeouts
TIME_OUT_MULT = 1.0
KICAD_VERSION_5_99 = 5099000
KICAD_SHARE = '/usr/share/kicad/'
KICAD_NIGHTLY_SHARE = '/usr/share/kicad-nightly/'
@contextmanager
def hide_stderr():
""" Low level stderr supression, used to hide KiCad bugs. """
newstderr = os.dup(2)
devnull = os.open('/dev/null', os.O_WRONLY)
os.dup2(devnull, 2)
os.close(devnull)
yield
os.dup2(newstderr, 2)
class Config(object):
def __init__(self, logger, input_file=None, args=None):
self.export_format = 'pdf'
if input_file:
self.input_file = input_file
self.input_no_ext = os.path.splitext(input_file)[0]
#
# As soon as we init pcbnew the following files are modified:
#
if os.path.isfile(self.input_no_ext+'.pro'):
self.start_pro_stat = os.stat(self.input_no_ext+'.pro')
else:
self.start_pro_stat = None
if os.path.isfile(self.input_no_ext+'.kicad_pro'):
self.start_kicad_pro_stat = os.stat(self.input_no_ext+'.kicad_pro')
else:
self.start_kicad_pro_stat = None
if os.path.isfile(self.input_no_ext+'.kicad_prl'):
self.start_kicad_prl_stat = os.stat(self.input_no_ext+'.kicad_prl')
else:
self.start_kicad_prl_stat = None
if args:
# Session debug
self.use_wm = args.use_wm # Use a Window Manager, dialogs behaves in a different way
self.start_x11vnc = args.start_x11vnc
self.rec_width = args.rec_width
self.rec_height = args.rec_height
self.record = args.record
self.video_dir = args.output_dir
self.wait_for_key = args.wait_key
self.time_out_scale = args.time_out_scale
# Others
if hasattr(args, 'file_format'):
self.export_format = args.file_format.lower()
else:
# Session debug
self.use_wm = False
self.start_x11vnc = False
self.rec_width = REC_W
self.rec_height = REC_H
self.record = False
self.video_dir = None
self.wait_for_key = False
self.time_out_scale = 1.0
self.colordepth = 24
self.video_name = None
self.video_dir = self.output_dir = ''
# Executable and dirs
self.eeschema = 'eeschema'
self.pcbnew = 'pcbnew'
self.kicad_conf_dir = 'kicad'
ng_ver = os.environ.get('KIAUS_USE_NIGHTLY')
if ng_ver:
self.eeschema += '-'+NIGHTLY
self.pcbnew += '-'+NIGHTLY
self.kicad_conf_dir += os.path.join(NIGHTLY, ng_ver)
# Path to the Python module
path.insert(0, '/usr/lib/kicad-nightly/lib/python3/dist-packages')
# Detect KiCad version
try:
import pcbnew
except ImportError:
logger.error("Failed to import pcbnew Python module."
" Is KiCad installed?"
" Do you need to add it to PYTHONPATH?")
exit(NO_PCBNEW_MODULE)
kicad_version = pcbnew.GetBuildVersion()
m = re.match(r'(\d+)\.(\d+)\.(\d+)', kicad_version)
self.kicad_version_major = int(m.group(1))
self.kicad_version_minor = int(m.group(2))
self.kicad_version_patch = int(m.group(3))
self.kicad_version = self.kicad_version_major*1000000+self.kicad_version_minor*1000+self.kicad_version_patch
logger.debug('Detected KiCad v{}.{}.{} ({} {})'.format(self.kicad_version_major, self.kicad_version_minor,
self.kicad_version_patch, kicad_version, self.kicad_version))
# Config file names
if self.kicad_version >= KICAD_VERSION_5_99:
self.kicad_conf_path = pcbnew.GetSettingsManager().GetUserSettingsPath()
if ng_ver:
self.kicad_conf_path = self.kicad_conf_path.replace('/kicad/', '/kicadnightly/')
else:
# Bug in KiCad (#6989), prints to stderr:
# `../src/common/stdpbase.cpp(62): assert "traits" failed in Get(test_dir): create wxApp before calling this`
# Found in KiCad 5.1.8, 5.1.9
# So we temporarily supress stderr
with hide_stderr():
self.kicad_conf_path = pcbnew.GetKicadConfigPath()
logger.debug('Config path {}'.format(self.kicad_conf_path))
# First we solve kicad_common because it can redirect to another config dir
self.conf_kicad = os.path.join(self.kicad_conf_path, 'kicad_common')
self.conf_kicad_bkp = None
if self.kicad_version >= KICAD_VERSION_5_99:
self.conf_kicad += '.json'
self.conf_kicad_json = True
else:
self.conf_kicad_json = False
# Read the environment redefinitions used by KiCad
if os.path.isfile(self.conf_kicad):
self.load_kicad_environment(logger)
if 'KICAD_CONFIG_HOME' in self.env and self.kicad_version < KICAD_VERSION_5_99:
# The user is redirecting the configuration
# KiCad 5 unintentionally allows it, is a bug, and won't be fixed:
# https://forum.kicad.info/t/kicad-config-home-inconsistencies-and-detail/26875
self.kicad_conf_path = self.env['KICAD_CONFIG_HOME']
logger.debug('Redirecting KiCad config path to: '+self.kicad_conf_path)
else:
logger.warning('Missing KiCad main config file '+self.conf_kicad)
# - eeschema config
self.conf_eeschema = os.path.join(self.kicad_conf_path, 'eeschema')
self.conf_eeschema_bkp = None
# - pcbnew config
self.conf_pcbnew = os.path.join(self.kicad_conf_path, 'pcbnew')
self.conf_pcbnew_bkp = None
# Config files that migrated to JSON
# Note that they remain in the old format until saved
if self.kicad_version >= KICAD_VERSION_5_99:
self.conf_eeschema += '.json'
self.conf_pcbnew += '.json'
self.conf_eeschema_json = True
self.conf_pcbnew_json = True
self.pro_ext = 'kicad_pro'
self.prl_ext = 'kicad_prl'
else:
self.conf_eeschema_json = False
self.conf_pcbnew_json = False
self.pro_ext = 'pro'
self.prl_ext = None
# - hotkeys
self.conf_hotkeys = os.path.join(self.kicad_conf_path, 'user.hotkeys')
self.conf_hotkeys_bkp = None
# - sym-lib-table
self.user_sym_lib_table = os.path.join(self.kicad_conf_path, 'sym-lib-table')
self.user_fp_lib_table = os.path.join(self.kicad_conf_path, 'fp-lib-table')
self.sys_sym_lib_table = [KICAD_SHARE+'template/sym-lib-table']
self.sys_fp_lib_table = [KICAD_SHARE+'template/fp-lib-table']
if ng_ver:
# 20200912: sym-lib-table is missing
self.sys_sym_lib_table.insert(0, KICAD_NIGHTLY_SHARE+'template/sym-lib-table')
self.sys_fp_lib_table.insert(0, KICAD_NIGHTLY_SHARE+'template/fp-lib-table')
# Some details about the UI
if self.kicad_version >= KICAD_VERSION_5_99:
# KiCad 5.99.0
self.ee_window_title = r'\[.*\] — Eeschema$' # "PROJECT [HIERARCHY_PATH] - Eeschema"
else:
# KiCad 5.1.6
self.ee_window_title = r'Eeschema.*\.sch' # "Eeschema - file.sch"
# Collected errors and unconnecteds (warnings)
self.errs = []
self.wrns = []
# Error filters
self.err_filters = []
def load_kicad_environment(self, logger):
self.env = {}
if self.conf_kicad_json:
env = self.get_config_vars_json(self.conf_kicad)
if env:
self.env = env
else:
env = self.get_config_vars_ini(self.conf_kicad)
if env:
for k, v in env.items():
self.env[k.upper()] = v
logger.debug('KiCad environment: '+str(self.env))
@staticmethod
def get_config_vars_json(file):
with open(file, "rt") as f:
data = json.load(f)
if 'environment' in data and 'vars' in data['environment']:
return data['environment']['vars']
return None
@staticmethod
def get_config_vars_ini(file):
config = configparser.ConfigParser()
with open(file, "rt") as f:
data = f.read()
config.read_string('[Various]\n'+data)
if 'EnvironmentVariables' in config:
return config['EnvironmentVariables']
return None
__author__ = 'Salvador E. Tropea'
__copyright__ = 'Copyright 2018-2021, INTI/Productize SPRL'
__credits__ = ['Salvador E. Tropea', 'Seppe Stas', 'Jesse Vincent', 'Scott Bezek']
__license__ = 'Apache 2.0'
__email__ = 'stropea@inti.gob.ar'
__status__ = 'beta'
__url__ = 'https://github.com/INTI-CMNB/KiAuto/'
__version__ = '1.5.8'
|
# Replace with DB URI; proto://user:pass@host/database
DB_URI = "replace"
# Replace with bot token
TOKEN = "replace"
# Replace with IDs of admin command users
ADMIN_IDS = []
# Replace with voice channel for audio clue
TARGET_VOICE_CHANNEL = 0
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Alexander Steffens <a.steffens@fz-juelich.de>
#
# *****************************************************************************
"""GALAXI Automatic vacuum control and detector positioning"""
from nicos.core.device import Readable
from nicos.core.params import Attach, Param, listof
from nicos.devices.tango import NamedDigitalOutput
class DetectorDistance(Readable):
"""Calculate detector distance based on the detector tubes position"""
attached_devices = {
'detectubes': Attach('Pilatus detector tubes', Readable, multiple=4)
}
parameters = {
'offset': Param('Minimum distance between Pilatus and sample',
type=int, settable=True),
'tubelength': Param('List of tube length',
type=listof(int), settable=False,
default=[450, 450, 900, 900]),
}
hardware_access = False
def doInit(self, mode):
self.log.debug('Detector distance init')
self.read()
def doRead(self, maxage=0):
distance = 0
for tube, l in zip(self._attached_detectubes, self.tubelength):
# tubes can only be set in correct sequence
if tube.read(maxage) != 'up':
break
distance += l
return self.offset + distance
class VacuumOperation(NamedDigitalOutput):
"""Provide different vacuum operation states"""
def doStop(self):
self._dev.Reset()
|
"""Lauda temperature controller class
Python class for Lauda temperature controllers
:platform: Unix
:synopsis: Python class for Lauda temperature controllers
.. moduleauthor:: Henrique Dante de Almeida <henrique.almeida@lnls.br>
"""
from threading import Event
from epics import Device, ca
from py4syn.epics.IScannable import IScannable
from py4syn.epics.StandardDevice import StandardDevice
from py4syn.utils.timer import Timer
class Lauda(StandardDevice, IScannable):
"""
Class to control Lauda temperature controllers via EPICS.
Examples
--------
>>> from py4syn.epics.LaudaClass import Lauda
>>>
>>> def showTemperature(pv):
... lauda = Lauda(pv, 'lauda')
... print('Temperature is: %d' % lauda.getValue())
...
>>> def setTemperature(lauda, temperature):
... lauda.setValue(temperature)
... lauda.run()
"""
EPSILON = 0.1
def __init__(self, pvName, mnemonic):
"""
**Constructor**
See :class:`py4syn.epics.StandardDevice`
Parameters
----------
pvName : `string`
Power supply base naming of the PV (Process Variable)
mnemonic : `string`
Temperature controller mnemonic
"""
super().__init__(mnemonic)
self.pvName = pvName
self.lauda = Device(pvName + ':', ['BLEVEL', 'BOVERTEMP', 'BPOWER', 'BSP',
'BSTATS', 'BTEMP', 'BTN', 'BTHERMOSTATS', 'WSP', 'WSTART',
'ETEMP', 'WPUMP', 'WSTOP', 'WTN'])
self.newTemperature = Event()
self.lauda.add_callback('BTEMP', self.onTemperatureChange)
# Skip initial callback
self.newTemperature.wait(1)
def __str__(self):
return '%s (%s)' % (self.getMnemonic(), self.pvName)
def getValue(self):
"""
Returns the current measured temperature.
Returns
-------
`int`
"""
return self.lauda.get('BTEMP')
def getRealPosition(self):
"""
Returns the same as :meth:`getValue`.
See: :meth:`getValue`
Returns
-------
`int`
"""
return self.getValue()
def onTemperatureChange(self, **kwargs):
"""
Helper callback that indicates when the measured temperature changed.
"""
self.newTemperature.set()
def setVelocity(self, r):
"""
Dummy method setVelocity()
Parameters
----------
r : `float`
Ramp speed in °C/min
"""
pass
def setValue(self, v):
"""
Changes the temperature to a new value.
Parameters
----------
v : `int`
The target temperature in °C
"""
self.lauda.put('WSP', v)
self.run()
self.requestedValue = v
def wait(self):
"""
Blocks until the requested temperature is achieved.
"""
ca.flush_io()
self.newTemperature.clear()
while abs(self.getValue()-self.requestedValue) > self.EPSILON:
# Give up after 60 seconds without an update
if not self.newTemperature.wait(60):
break
self.newTemperature.clear()
def getLowLimitValue(self):
"""
Returns the controller low limit temperature.
Returns
-------
`int`
"""
return -20
def getHighLimitValue(self):
"""
Returns the controller high limit temperature.
Returns
-------
`int`
"""
return 200
def run(self):
"""
Starts or resumes executing the current temperature program.
"""
self.lauda.put('WSTART', 1)
def stop(self):
"""
Stops executing the current temperature program and puts the device in idle state.
In the idle state, the device will not try to set a target temperature.
"""
self.lauda.put('WSTOP', 1)
def setPumpSpeed(self, speed):
"""
Changes the pump speed.
Parameters
----------
speed : `int`
The requested pump speed, ranging from 1 to 8.
"""
if speed < 1 or speed > 8:
raise ValueError('Invalid speed')
self.lauda.put('WPUMP', speed)
def getInternalTemp(self):
"""
Same as :meth:`getValue`.
See :meth:`getValue`
Returns
-------
`int`
"""
return self.getValue()
def getExternalTemp(self):
"""
Returns the device's external temperature.
Returns
-------
`int`
"""
return self.lauda.get('ETEMP')
def getLevel(self):
"""
Returns the device's bath level.
Returns
-------
`int`
"""
return self.lauda.get('BLEVEL')
def getSetPoint(self):
"""
Returns the current target temperature.
Returns
-------
`int`
"""
return self.lauda.get('BSP')
def getPower(self):
"""
Returns the current device power.
Returns
----------
`int`
"""
return self.lauda.get('BPOWER')
def getOverTemp(self):
"""
Returns the maximum temperature software defined limit.
Returns
----------
`int`
"""
return self.lauda.get('BOVERTEMP')
def getTN(self):
"""
Returns
----------
`int`
"""
return self.lauda.get('BTN')
def getStatus(self):
"""
Returns the device status word.
Returns
----------
`int`
"""
return self.lauda.get('BSTATS')
def getThermoStatus(self):
"""
Returns the device thermostat error word.
Returns
----------
`int`
"""
return self.lauda.get('BTHERMOSTATS')
def changeSetPoint(self, val):
"""
Same as :meth:`setValue`.
See :meth:`setValue`
Parameters
----------
val : `int`
The requested temperature.
"""
self.setValue(val)
def changePump(self, val):
"""
Same as :meth:`setPumpSpeed`.
See :meth:`setPumpSpeed`
Parameters
----------
val : `int`
The requested pump speed.
"""
self.setPumpSpeed(val)
def changeTN(self, val):
self.lauda.put('WTN', val)
def start(self):
"""
Same as :meth:`run`.
See :meth:`run`
"""
self.run()
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse.linalg as spLA
import majoranaJJ.operators.sparse.qmsops as spop #sparse operators
import majoranaJJ.lattice.nbrs as nb #neighbor arrays
import majoranaJJ.lattice.shapes as shps #lattice shapes
import majoranaJJ.modules.plots as plots #plotting functions
R = 50
r = 15
ax = 10 #[A]
ay = 10 #[A]
coor = shps.donut(R, r)
NN = nb.NN_Arr(coor)
print("lattice size", coor.shape[0])
alpha = 0 #Spin-Orbit Coupling constant: [eV*A]
gammaz = 0 #Zeeman field energy contribution: [T]
delta = 0 #Superconducting Gap: [eV]
V0 = 0.0 #Amplitude of potential : [eV]
mu = 0 #Chemical Potential: [eV]
H = spop.H0(coor, ax, ay, NN)
print("H shape: ", H.shape)
num = 75 # This is the number of eigenvalues and eigenvectors you want
sigma = 0 # This is the eigenvalue we search around
which = 'LM'
eigs, vecs = spLA.eigsh(H, k = num, sigma = sigma, which = which)
plots.state_cmap(coor, eigs, vecs, n = 0, title = 'SPARSE Free Particle Ground State')
n = 39
plots.state_cmap(coor, eigs, vecs, n = n, title = 'SPARSE: Excited State # {}'.format(n))
|
#!/usr/bin/env python3.4
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask.ext.login import login_user, logout_user, login_required
from ..models import User
from ..forms import LoginForm
auth = Blueprint('auth', __name__)
@auth.route('/login', methods=['GET', 'POST'])
def login():
"""
The login view. It uses the login form from forms and relies on
Flask-login to do it's biding.
If the form is valid on submit, the functions gets the user object
using his username.
"""
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if not user or not user.verify_password(form.password.data):
flash('Invalid email or password')
return redirect(url_for('auth.login'))
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('admin.dashboard'))
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('Logged out and good to go.')
return redirect(url_for('blog.main'))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################################################################################################################################################################
######################################################################################################## PRE-DEFINED IMPORTS #######################################################################################################
####################################################################################################################################################################################################################################
# Imports that are necessary for the program architecture to work properly
# Do not edit this code
import ast
import sys
import os
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED CONSTANTS ######################################################################################################
####################################################################################################################################################################################################################################
# Possible characters to send to the maze application
# Any other will be ignored
# Do not edit this code
UP = 'U'
DOWN = 'D'
LEFT = 'L'
RIGHT = 'R'
####################################################################################################################################################################################################################################
# Name of your team
# It will be displayed in the maze
# You have to edit this code
TEAM_NAME = "closest"
####################################################################################################################################################################################################################################
########################################################################################################## YOUR VARIABLES ##########################################################################################################
####################################################################################################################################################################################################################################
# Stores all the moves in a list to restitute them one by one
allMoves = [RIGHT, RIGHT, RIGHT, UP, RIGHT, RIGHT, RIGHT, UP, UP, UP, RIGHT, UP, UP, UP, RIGHT, UP]
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED FUNCTIONS ######################################################################################################
####################################################################################################################################################################################################################################
# Writes a message to the shell
# Use for debugging your program
# Channels stdout and stdin are captured to enable communication with the maze
# Do not edit this code
def debug (text) :
# Writes to the stderr channel
sys.stderr.write(str(text) + "\n")
sys.stderr.flush()
####################################################################################################################################################################################################################################
# Reads one line of information sent by the maze application
# This function is blocking, and will wait for a line to terminate
# The received information is automatically converted to the correct type
# Do not edit this code
def readFromPipe () :
# Reads from the stdin channel and returns the structure associated to the string
try :
text = sys.stdin.readline()
return ast.literal_eval(text.strip())
except :
os._exit(-1)
####################################################################################################################################################################################################################################
# Sends the text to the maze application
# Do not edit this code
def writeToPipe (text) :
# Writes to the stdout channel
sys.stdout.write(text)
sys.stdout.flush()
####################################################################################################################################################################################################################################
# Reads the initial maze information
# The function processes the text and returns the associated variables
# The dimensions of the maze are positive integers
# Maze map is a dictionary associating to a location its adjacent locations and the associated weights
# The preparation time gives the time during which 'initializationCode' can make computations before the game starts
# The turn time gives the time during which 'determineNextMove' can make computations before returning a decision
# Player locations are tuples (line, column)
# Coins are given as a list of locations where they appear
# A boolean indicates if the game is over
# Do not edit this code
def processInitialInformation () :
# We read from the pipe
data = readFromPipe()
return (data['mazeWidth'], data['mazeHeight'], data['mazeMap'], data['preparationTime'], data['turnTime'], data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
# Reads the information after each player moved
# The maze map and allowed times are no longer provided since they do not change
# Do not edit this code
def processNextInformation () :
# We read from the pipe
data = readFromPipe()
return (data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
########################################################################################################## YOUR FUNCTIONS ##########################################################################################################
####################################################################################################################################################################################################################################
# This is where you should write your code to do things during the initialization delay
# This function should not return anything, but should be used for a short preprocessing
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def initializationCode (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# Nothing to do
pass
####################################################################################################################################################################################################################################
# This is where you should write your code to determine the next direction
# This function should return one of the directions defined in the CONSTANTS section
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def determineNextMove (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# We return the next move as described by the list
global allMoves
nextMove = allMoves[0]
allMoves = allMoves[1:]
return nextMove
####################################################################################################################################################################################################################################
############################################################################################################# MAIN LOOP ############################################################################################################
####################################################################################################################################################################################################################################
# This is the entry point when executing this file
# We first send the name of the team to the maze
# The first message we receive from the maze includes its dimensions and map, the times allowed to the various steps, and the players and coins locations
# Then, at every loop iteration, we get the maze status and determine a move
# Do not edit this code
if __name__ == "__main__" :
# We send the team name
writeToPipe(TEAM_NAME + "\n")
# We process the initial information and have a delay to compute things using it
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = processInitialInformation()
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
writeToPipe(nextMove)
####################################################################################################################################################################################################################################
####################################################################################################################################################################################################################################
|
from distutils.core import setup
setup(
name='TestLibrary_MR', # How you named your package folder (MyLib)
packages=['TestLibrary_MR'], # Chose the same as "name"
version='0.2', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description='Just a test module', # Give a short description about your library
author='Md. Masudur Rahman', # Type in your name
author_email='masudurhimel@gmail.com', # Type in your E-Mail
url='https://github.com/masudurHimel/TestLibrary_MR', # Provide either the link to your github or to your website
download_url='https://github.com/masudurHimel/TestLibrary_MR/archive/refs/tags/v_02.tar.gz', # I explain this later on
keywords=['test'], # Keywords that define your package best
install_requires=[],
classifiers=[
'Development Status :: 3 - Alpha',
# Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', # Specify which pyhton versions that you want to support
],
)
|
import discord
from discord.ext import commands
from decorators import *
from io import BytesIO
from urllib.parse import quote
from base64 import b64encode
from json import loads
class encoding(commands.Cog):
def __init__(self):
self.ciphers = loads(open("./assets/json/encode.json", "r").read())
pass
@command(["jumble"])
@cooldown(2)
@require_args()
async def shuffle(self, ctx, *args):
return await ctx.reply(ctx.bot.util.shuffle(" ".join(args)))
@command(["morse-code"])
@cooldown(5)
@require_args()
async def morse(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += " " + self.ciphers.get(char, { "morse": char })["morse"]
return await ctx.reply(total[1:])
@command(["blind"])
@cooldown(5)
@require_args()
async def braille(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "braille": char })["braille"]
return await ctx.reply(total)
@command(["curve", "curve-text"])
@cooldown(5)
@require_args()
async def cursive(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "cursive": char })["cursive"]
return await ctx.reply(total)
@command(["fancy-text"])
@cooldown(5)
@require_args()
async def fancy(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "fancy": char })["fancy"]
return await ctx.reply(total)
@command(["upside-down", "upsidedown", "flip-text", "textflip"])
@cooldown(5)
@require_args()
async def fliptext(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "upside-down": char })["upside-down"]
return await ctx.reply(total)
@command()
@cooldown(4)
@require_args()
@permissions(bot=['attach_files'])
async def ascii(self, ctx, *args):
await ctx.trigger_typing()
parser = ctx.bot.Parser(args)
parser.parse(('hastebin',))
if (not parser) or (not parser.has("image")):
if not parser.other:
return await ctx.bot.cmds.invalid_args(ctx)
ascii = await ctx.bot.util.request(
"http://artii.herokuapp.com/make",
text=' '.join(parser.other)
)
if parser.has("hastebin"):
try:
response = await ctx.bot.http._HTTPClient__session.post("https://paste.mod.gg/documents", data=ascii)
assert response.status < 400
json = await response.json()
await ctx.success_embed(description=f"[**Click here to see the asciified text.**](https://paste.mod.gg/{json['key']})")
del ascii, image, parser, json
return
except AssertionError:
pass
await ctx.reply(f'```{ascii[:2000]}```')
del ascii, parser
return
parser.shift("image")
image = await ctx.bot.Parser.parse_image(ctx, parser.other)
string = await ctx.bot.Image.asciify(image)
if hastebin:
try:
response = await ctx.bot.http._HTTPClient__session.post("https://paste.mod.gg/documents", data=string)
assert response.status < 400
json = await response.json()
await ctx.success_embed(description=f"[**Click here to see the asciified image.**](https://paste.mod.gg/{json['key']})")
del string, image, parser, hastebin, json
return
except AssertionError:
pass
await ctx.bot.http.send_files(ctx.channel.id, content="", files=[discord.File(BytesIO(bytes(string, 'utf-8')), "asciified.txt")])
del string, image, parser, hastebin
@command()
@cooldown(2)
@permissions(bot=['attach_files'])
@require_args()
async def barcode(self, ctx, *args):
await ctx.trigger_typing()
return await ctx.send_image('http://www.barcode-generator.org/zint/api.php?bc_number=20&bc_data=' + quote(' '.join(args))[:75])
@command(['qrcode', 'qr-code'])
@cooldown(2)
@permissions(bot=['attach_files'])
@require_args()
async def qr(self, ctx, *args):
await ctx.trigger_typing()
return await ctx.send_image('https://api.qrserver.com/v1/create-qr-code/?size=150x150&data=' + quote(' '.join(args))[:75])
@command()
@cooldown(2)
@require_args()
async def binary(self, ctx, *args):
return await ctx.reply('```'+''.join(map(lambda x: f"{ord(x):08b}", ' '.join(args)))[:2000]+'```')
@command()
@cooldown(2)
@require_args(2)
async def caesar(self, ctx, *args):
offset = ctx.bot.Parser.get_numbers(args)
if not offset:
return await ctx.bot.cmds.invalid_args(ctx)
return await ctx.reply(ctx.bot.util.caesar(str(' '.join(args).replace(str(offset[0]), '')), offset[0]))
@command()
@cooldown(2)
@require_args()
async def atbash(self, ctx, *args):
return await ctx.reply(ctx.bot.util.atbash(' '.join(args)))
@command()
@cooldown(2)
@require_args()
async def reverse(self, ctx, *args):
return await ctx.reply(' '.join(args)[::-1])
@command(['b64'])
@cooldown(2)
@require_args()
async def base64(self, ctx, *args):
return await ctx.reply(b64encode(' '.join(args).encode('ascii')).decode('ascii'))
def setup(client):
client.add_cog(encoding())
|
import json
import requests
EDHREC_BASE_URL = 'https://edhrec-json.s3.amazonaws.com/commanders/%s.json'
COMMANDER_PAGE_SLUGS = frozenset([
'w',
'u',
'b',
'r',
'g',
'colorless',
'wu',
'ub',
'br',
'rg',
'gw',
'wb',
'ur',
'bg',
'rw',
'gu',
'wub',
'ubr',
'brg',
'rgw',
'gwu',
'wbg',
'urw',
'bgu',
'rwb',
'gur',
'wubr',
'ubrg',
'brgw',
'rgwu',
'gwub',
'wubrg',
])
def scrape_commanders_json(color_slug):
url = EDHREC_BASE_URL % color_slug
req = requests.get(url)
print(req.status_code, url)
if(req.status_code != 200):
return
json_obj = req.json()['container']['json_dict']
cards = json_obj['cardlists'][0]['cardviews']
counts = []
for card in cards:
card_name = card['name']
card_count = int(card['label'].split(' ')[0])
counts.append([card_name, card_count])
return counts
def scrape_edhrec_json():
counts = []
for slug in COMMANDER_PAGE_SLUGS:
counts.extend(scrape_commanders_json(slug))
for card in counts:
print(card)
return counts
if __name__ == "__main__":
print(scrape_commanders_json('b'))
|
import get_coefficients_as_list
import check_diagonal_dominant
# function that computes in gauss jacobi method
def gauss_jacobi(no_of_unknowns):
coefficient_list = get_coefficients_as_list.get_coefficients_as_list(no_of_unknowns)
if check_diagonal_dominant.is_diagonally_dominant(coefficient_list):
print("Computing...")
else:
print("Matrix failed to be diagonally dominant\nExiting...")
return
factors = [0]*(no_of_unknowns)
sample_factors = [0]*(no_of_unknowns)
for i in range(0,6):
for j in range(0,no_of_unknowns):
diff = 0
for k in range(0,j):
diff = diff + coefficient_list[j][k]*factors[k]
for k in range(j+1,no_of_unknowns):
diff = diff + coefficient_list[j][k]*factors[k]
#print(coefficient_list[j][no_of_unknowns],"-",diff,"/",coefficient_list[j][j])
diff = (coefficient_list[j][no_of_unknowns]-diff)/coefficient_list[j][j]
sample_factors = sample_factors[0:j]+[diff]+sample_factors[j+1:]
factors = sample_factors
print("At iteration ",i+1," factors are ",factors)
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI
from cinder import objects
from cinder import quota
from cinder.volume.flows import common as flow_common
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
ACTION = 'snapshot:manage_existing'
class ExtractSnapshotRefTask(flow_utils.CinderTask):
"""Extracts snapshot reference for given snapshot id."""
default_provides = 'snapshot_ref'
def __init__(self, db):
super(ExtractSnapshotRefTask, self).__init__(addons=[ACTION])
self.db = db
def execute(self, context, snapshot_id):
# NOTE(wanghao): this will fetch the snapshot from the database, if
# the snapshot has been deleted before we got here then this should
# fail.
#
# In the future we might want to have a lock on the snapshot_id so that
# the snapshot can not be deleted while its still being created?
snapshot_ref = objects.Snapshot.get_by_id(context, snapshot_id)
LOG.debug("ExtractSnapshotRefTask return"
" snapshot_ref: %s", snapshot_ref)
return snapshot_ref
def revert(self, context, snapshot_id, result, **kwargs):
if isinstance(result, ft.Failure):
return
flow_common.error_out_snapshot(context, self.db, snapshot_id)
LOG.error(_LE("Snapshot %s: create failed"), snapshot_id)
class NotifySnapshotActionTask(flow_utils.CinderTask):
"""Performs a notification about the given snapshot when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix, host):
super(NotifySnapshotActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
self.host = host
def execute(self, context, snapshot_ref):
snapshot_id = snapshot_ref['id']
try:
volume_utils.notify_about_snapshot_usage(context, snapshot_ref,
self.event_suffix,
host=self.host)
except exception.CinderException:
# If notification sending of snapshot database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for snapshots to operate
LOG.exception(_LE("Failed notifying about the snapshot "
"action %(event)s for snapshot %(snp_id)s."),
{'event': self.event_suffix,
'snp_id': snapshot_id})
class PrepareForQuotaReservationTask(flow_utils.CinderTask):
"""Gets the snapshot size from the driver."""
default_provides = set(['size', 'snapshot_properties'])
def __init__(self, db, driver):
super(PrepareForQuotaReservationTask, self).__init__(addons=[ACTION])
self.db = db
self.driver = driver
def execute(self, context, snapshot_ref, manage_existing_ref):
snapshot_id = snapshot_ref['id']
if not self.driver.initialized:
driver_name = (self.driver.configuration.
safe_get('volume_backend_name'))
LOG.error(_LE("Unable to manage existing snapshot. "
"Volume driver %s not initialized."), driver_name)
flow_common.error_out_snapshot(context, self.db, snapshot_id,
reason=_("Volume driver %s "
"not initialized.") %
driver_name)
raise exception.DriverNotInitialized()
size = self.driver.manage_existing_snapshot_get_size(
snapshot=snapshot_ref,
existing_ref=manage_existing_ref)
return {'size': size,
'snapshot_properties': snapshot_ref}
class QuotaReserveTask(flow_utils.CinderTask):
"""Reserves a single snapshot with the given size.
Reversion strategy: rollback the quota reservation.
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
default_provides = set(['reservations'])
def __init__(self):
super(QuotaReserveTask, self).__init__(addons=[ACTION])
def execute(self, context, size, optional_args):
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': size}
reservations = QUOTAS.reserve(context, **reserve_opts)
return {
'reservations': reservations,
}
except exception.OverQuota as e:
overs = e.kwargs['overs']
quotas = e.kwargs['quotas']
usages = e.kwargs['usages']
volume_utils.process_reserve_over_quota(context, overs, usages,
quotas, size)
def revert(self, context, result, optional_args, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
if optional_args['is_quota_committed']:
# The reservations have already been committed and can not be
# rolled back at this point.
return
# We actually produced an output that we can revert so lets attempt
# to use said output to rollback the reservation.
reservations = result['reservations']
try:
QUOTAS.rollback(context, reservations)
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
LOG.exception(_LE("Failed rolling back quota for"
" %s reservations."), reservations)
class QuotaCommitTask(flow_utils.CinderTask):
"""Commits the reservation.
Reversion strategy: N/A (the rollback will be handled by the task that did
the initial reservation (see: QuotaReserveTask).
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
def __init__(self):
super(QuotaCommitTask, self).__init__(addons=[ACTION])
def execute(self, context, reservations, snapshot_properties,
optional_args):
QUOTAS.commit(context, reservations)
# updating is_quota_committed attribute of optional_args dictionary
optional_args['is_quota_committed'] = True
return {'snapshot_properties': snapshot_properties}
def revert(self, context, result, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
snapshot = result['snapshot_properties']
try:
reserve_opts = {'snapshots': -1,
'gigabytes': -snapshot['volume_size']}
reservations = QUOTAS.reserve(context,
project_id=context.project_id,
**reserve_opts)
if reservations:
QUOTAS.commit(context, reservations,
project_id=context.project_id)
except Exception:
LOG.exception(_LE("Failed to update quota while deleting "
"snapshots: %s"), snapshot['id'])
class ManageExistingTask(flow_utils.CinderTask):
"""Brings an existing snapshot under Cinder management."""
default_provides = set(['snapshot', 'new_status'])
def __init__(self, db, driver):
super(ManageExistingTask, self).__init__(addons=[ACTION])
self.db = db
self.driver = driver
def execute(self, context, snapshot_ref, manage_existing_ref, size):
model_update = self.driver.manage_existing_snapshot(
snapshot=snapshot_ref,
existing_ref=manage_existing_ref)
if not model_update:
model_update = {}
model_update.update({'size': size})
try:
snapshot_object = objects.Snapshot.get_by_id(context,
snapshot_ref['id'])
snapshot_object.update(model_update)
snapshot_object.save()
except exception.CinderException:
LOG.exception(_LE("Failed updating model of snapshot "
"%(snapshot_id)s with creation provided model "
"%(model)s."),
{'snapshot_id': snapshot_ref['id'],
'model': model_update})
raise
return {'snapshot': snapshot_ref,
'new_status': 'available'}
class CreateSnapshotOnFinishTask(NotifySnapshotActionTask):
"""Perform final snapshot actions.
When a snapshot is created successfully it is expected that MQ
notifications and database updates will occur to 'signal' to others that
the snapshot is now ready for usage. This task does those notifications and
updates in a reliable manner (not re-raising exceptions if said actions can
not be triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix, host):
super(CreateSnapshotOnFinishTask, self).__init__(db, event_suffix,
host)
def execute(self, context, snapshot, new_status):
LOG.debug("Begin to call CreateSnapshotOnFinishTask execute.")
snapshot_id = snapshot['id']
LOG.debug("New status: %s", new_status)
update = {
'status': new_status
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'building' if this fails)??
snapshot_object = objects.Snapshot.get_by_id(context,
snapshot_id)
snapshot_object.update(update)
snapshot_object.save()
# Now use the parent to notify.
super(CreateSnapshotOnFinishTask, self).execute(context, snapshot)
except exception.CinderException:
LOG.exception(_LE("Failed updating snapshot %(snapshot_id)s with "
"%(update)s."), {'snapshot_id': snapshot_id,
'update': update})
# Even if the update fails, the snapshot is ready.
LOG.info(_LI("Snapshot %s created successfully."), snapshot_id)
def get_flow(context, db, driver, host, snapshot_id, ref):
"""Constructs and returns the manager entry point flow."""
LOG.debug("Input parameters: context=%(context)s, db=%(db)s,"
"driver=%(driver)s, host=%(host)s, "
"snapshot_id=(snapshot_id)s, ref=%(ref)s.",
{'context': context,
'db': db,
'driver': driver,
'host': host,
'snapshot_id': snapshot_id,
'ref': ref}
)
flow_name = ACTION.replace(":", "_") + "_manager"
snapshot_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'snapshot_id': snapshot_id,
'manage_existing_ref': ref,
'optional_args': {'is_quota_committed': False}
}
notify_start_msg = "manage_existing_snapshot.start"
notify_end_msg = "manage_existing_snapshot.end"
snapshot_flow.add(ExtractSnapshotRefTask(db),
NotifySnapshotActionTask(db, notify_start_msg,
host=host),
PrepareForQuotaReservationTask(db, driver),
QuotaReserveTask(),
ManageExistingTask(db, driver),
QuotaCommitTask(),
CreateSnapshotOnFinishTask(db, notify_end_msg,
host=host))
LOG.debug("Begin to return taskflow.engines."
"load(snapshot_flow,store=create_what).")
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(snapshot_flow, store=create_what)
|
import sys
from os import path
from setuptools import find_packages, setup
import versioneer
min_version = (3, 6)
if sys.version_info < min_version:
error = """
pcdscalc does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*sys.version_info[:2], *min_version)
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
git_requirements = [r for r in requirements if r.startswith('git+')]
if git_requirements:
print('User must install the following packages manually:')
print()
print("\n".join(f'* {r}' for r in git_requirements))
print()
setup(
name='pcdscalc',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD',
author='SLAC National Accelerator Laboratory',
packages=find_packages(exclude=['docs', 'tests']),
description='PCDS Calculation Routines',
long_description=readme,
url='https://github.com/pcdshub/pcdscalc', # noqa
entry_points={
'console_scripts': [
# 'pcdscalc=pcdscalc.__main__:main', # noqa
],
},
include_package_data=True,
package_data={
'pcdscalc': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
"""
Interface to the accounts table. Data format is dicts, not objects.
"""
from anchore_engine.db import Account, AccountTypes, AccountStates
from anchore_engine.db.entities.common import anchore_now
class AccountNotFoundError(Exception):
def __init__(self, account_name):
super(AccountNotFoundError, self).__init__('User account not found. Name={}'.format(account_name))
self.account_name = account_name
class AccountAlreadyExistsError(Exception):
def __init__(self, account_name):
super(AccountAlreadyExistsError, self).__init__('User account already exists. name={}'.format(account_name))
self.account_name = account_name
class InvalidStateError(Exception):
def __init__(self, current_state, desired_state):
super(InvalidStateError, self).__init__('Invalid account state change requested. Cannot go from state {} to state {}'.format(current_state.value, desired_state.value))
self.current_state = current_state
self.desired_state = desired_state
def add(account_name, state=AccountStates.enabled, account_type=AccountTypes.user, email=None, session=None):
found_account = session.query(Account).filter_by(name=account_name).one_or_none()
if found_account:
raise AccountAlreadyExistsError(account_name)
accnt = Account()
accnt.name = account_name
accnt.state = state
accnt.type = account_type
accnt.email = email
accnt.created_at = anchore_now()
accnt.last_updated = anchore_now()
session.add(accnt)
return accnt.to_dict()
def update_state(name, new_state, session=None):
"""
Update state of the account. Allowed transitions:
active -> disabled
disabled -> active
disabled -> deleting
Deleting is a terminal state, and can be reached only from disabled
:param name:
:param new_state:
:param session:
:return:
"""
accnt = session.query(Account).filter_by(name=name).one_or_none()
if not accnt:
raise AccountNotFoundError(name)
# Deleting state is terminal. Must deactivate account prior to deleting it.
if accnt.state == AccountStates.deleting or (accnt.state == AccountStates.enabled and new_state == AccountStates.deleting):
raise InvalidStateError(accnt.state, new_state)
accnt.state = new_state
return accnt.to_dict()
def get_all(with_state=None, session=None):
if with_state is not None:
return [x.to_dict() for x in session.query(Account).filter(Account.state == with_state)]
else:
return [x.to_dict() for x in session.query(Account)]
def get(name, session=None):
accnt = session.query(Account).filter_by(name=name).one_or_none()
if accnt:
return accnt.to_dict()
else:
return None
def delete(name, session=None):
accnt = session.query(Account).filter_by(name=name).one_or_none()
if accnt:
session.delete(accnt)
return True
else:
return False
|
from keras.preprocessing.image import *
from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
from PIL import Image
import numpy as np
import os
#import cv2
def center_crop(x, center_crop_size, data_format, **kwargs):
if data_format == 'channels_first':
centerh, centerw = x.shape[1] // 2, x.shape[2] // 2
elif data_format == 'channels_last':
centerh, centerw = x.shape[0] // 2, x.shape[1] // 2
lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2
rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw
h_start, h_end = centerh - lh, centerh + rh
w_start, w_end = centerw - lw, centerw + rw
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :]
def pair_center_crop(x, y, center_crop_size, data_format, **kwargs):
if data_format == 'channels_first':
centerh, centerw = x.shape[1] // 2, x.shape[2] // 2
elif data_format == 'channels_last':
centerh, centerw = x.shape[0] // 2, x.shape[1] // 2
lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2
rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw
h_start, h_end = centerh - lh, centerh + rh
w_start, w_end = centerw - lw, centerw + rw
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end], \
y[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :], \
y[h_start:h_end, w_start:w_end, :]
def random_crop(x, random_crop_size, data_format, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
if data_format == 'channels_first':
h, w = x.shape[1], x.shape[2]
elif data_format == 'channels_last':
h, w = x.shape[0], x.shape[1]
rangeh = (h - random_crop_size[0]) // 2
rangew = (w - random_crop_size[1]) // 2
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
h_start, h_end = offseth, offseth + random_crop_size[0]
w_start, w_end = offsetw, offsetw + random_crop_size[1]
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :]
def pair_random_crop(x, y, random_crop_size, data_format, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
if data_format == 'channels_first':
h, w = x.shape[1], x.shape[2]
elif data_format == 'channels_last':
h, w = x.shape[0], x.shape[1]
rangeh = (h - random_crop_size[0]) // 2
rangew = (w - random_crop_size[1]) // 2
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
h_start, h_end = offseth, offseth + random_crop_size[0]
w_start, w_end = offsetw, offsetw + random_crop_size[1]
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end], y[:, h_start:h_end, h_start:h_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :], y[h_start:h_end, w_start:w_end, :]
class SegDirectoryIterator(Iterator):
'''
Users need to ensure that all files exist.
Label images should be png images where pixel values represents class number.
find images -name *.jpg > images.txt
find labels -name *.png > labels.txt
for a file name 2011_002920.jpg, each row should contain 2011_002920
file_path: location of train.txt, or val.txt in PASCAL VOC2012 format,
listing image file path components without extension
data_dir: location of image files referred to by file in file_path
label_dir: location of label files
data_suffix: image file extension, such as `.jpg` or `.png`
label_suffix: label file suffix, such as `.png`, or `.npy`
loss_shape: shape to use when applying loss function to the label data
'''
def __init__(self, file_path, seg_data_generator,
data_dir, data_suffix,
label_dir, label_suffix, classes, ignore_label=255,
crop_mode='none', label_cval=255, pad_size=None,
target_size=None, color_mode='rgb',
data_format='default', class_mode='sparse',
batch_size=1, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
loss_shape=None):
if data_format == 'default':
data_format = K.image_data_format()
self.file_path = file_path
self.data_dir = data_dir
self.data_suffix = data_suffix
self.label_suffix = label_suffix
self.label_dir = label_dir
self.classes = classes
self.seg_data_generator = seg_data_generator
self.target_size = tuple(target_size)
self.ignore_label = ignore_label
self.crop_mode = crop_mode
self.label_cval = label_cval
self.pad_size = pad_size
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
self.nb_label_ch = 1
self.loss_shape = loss_shape
if (self.label_suffix == '.npy') or (self.label_suffix == 'npy'):
self.label_file_format = 'npy'
else:
self.label_file_format = 'img'
if target_size:
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
if self.data_format == 'channels_last':
self.label_shape = self.target_size + (self.nb_label_ch,)
else:
self.label_shape = (self.nb_label_ch,) + self.target_size
elif batch_size != 1:
raise ValueError(
'Batch size must be 1 when target image size is undetermined')
else:
self.image_shape = None
self.label_shape = None
if class_mode not in {'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of '
'"sparse", or None.')
self.class_mode = class_mode
if save_to_dir:
self.palette = None
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'npy'}
# build lists for data files and label files
self.data_files = []
self.label_files = []
fp = open(file_path)
lines = fp.readlines()
fp.close()
self.nb_sample = len(lines)
for line in lines:
line = line.strip('\n')
self.data_files.append(line + data_suffix)
self.label_files.append(line + label_suffix)
super(SegDirectoryIterator, self).__init__(
self.nb_sample, batch_size, shuffle, seed)
def next(self):
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock so it can be
# done in parallel
if self.target_size:
# TODO(ahundt) make dtype properly configurable
batch_x = np.zeros((current_batch_size,) + self.image_shape)
if self.loss_shape is None and self.label_file_format is 'img':
batch_y = np.zeros((current_batch_size,) + self.label_shape,
dtype=int)
elif self.loss_shape is None:
batch_y = np.zeros((current_batch_size,) + self.label_shape)
else:
batch_y = np.zeros((current_batch_size,) + self.loss_shape,
dtype=np.uint8)
grayscale = self.color_mode == 'grayscale'
# build batch of image data and labels
for i, j in enumerate(index_array):
data_file = self.data_files[j]
label_file = self.label_files[j]
img_file_format = 'img'
img = load_img(os.path.join(self.data_dir, data_file),
grayscale=grayscale, target_size=None)
label_filepath = os.path.join(self.label_dir, label_file)
if self.label_file_format == 'npy':
y = np.load(label_filepath)
else:
label = Image.open(label_filepath)
if self.save_to_dir and self.palette is None:
self.palette = label.palette
# do padding
if self.target_size:
if self.crop_mode != 'none':
x = img_to_array(img, data_format=self.data_format)
if self.label_file_format is not 'npy':
y = img_to_array(
label, data_format=self.data_format).astype(int)
img_w, img_h = img.size
if self.pad_size:
pad_w = max(self.pad_size[1] - img_w, 0)
pad_h = max(self.pad_size[0] - img_h, 0)
else:
pad_w = max(self.target_size[1] - img_w, 0)
pad_h = max(self.target_size[0] - img_h, 0)
if self.data_format == 'channels_first':
x = np.lib.pad(x, ((0, 0), (pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2)), 'constant', constant_values=0.)
y = np.lib.pad(y, ((0, 0), (pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2)),
'constant', constant_values=self.label_cval)
elif self.data_format == 'channels_last':
x = np.lib.pad(x, ((pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2), (0, 0)), 'constant', constant_values=0.)
y = np.lib.pad(y, ((pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2), (0, 0)), 'constant', constant_values=self.label_cval)
else:
x = img_to_array(img.resize((self.target_size[1], self.target_size[0]),
Image.BILINEAR),
data_format=self.data_format)
if self.label_file_format is not 'npy':
y = img_to_array(label.resize((self.target_size[1], self.target_size[
0]), Image.NEAREST), data_format=self.data_format).astype(int)
else:
print('ERROR: resize not implemented for label npy file')
if self.target_size is None:
batch_x = np.zeros((current_batch_size,) + x.shape)
if self.loss_shape is not None:
batch_y = np.zeros((current_batch_size,) + self.loss_shape)
else:
batch_y = np.zeros((current_batch_size,) + y.shape)
x, y = self.seg_data_generator.random_transform(x, y)
x = self.seg_data_generator.standardize(x)
if self.ignore_label:
y[np.where(y == self.ignore_label)] = self.classes
if self.loss_shape is not None:
y = np.reshape(y, self.loss_shape)
batch_x[i] = x
batch_y[i] = y
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
label = batch_y[i][:, :, 0].astype('uint8')
label[np.where(label == self.classes)] = self.ignore_label
label = Image.fromarray(label, mode='P')
label.palette = self.palette
fname = '{prefix}_{index}_{hash}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4))
img.save(os.path.join(self.save_to_dir, 'img_' +
fname + '.{format}'.format(format=self.save_format)))
label.save(os.path.join(self.save_to_dir,
'label_' + fname + '.png'))
# return
batch_x = preprocess_input(batch_x)
if self.class_mode == 'sparse':
return batch_x, batch_y
else:
return batch_x
class SegDataGenerator(object):
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
channelwise_center=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
zoom_maintain_shape=True,
channel_shift_range=0.,
fill_mode='constant',
cval=0.,
label_cval=255,
crop_mode='none',
crop_size=(0, 0),
pad_size=None,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
data_format='default'):
if data_format == 'default':
data_format = K.image_data_format()
self.__dict__.update(locals())
self.mean = None
self.ch_mean = None
self.std = None
self.principal_components = None
self.rescale = rescale
if data_format not in {'channels_last', 'channels_first'}:
raise Exception('data_format should be channels_last (channel after row and '
'column) or channels_first (channel before row and column). '
'Received arg: ', data_format)
if crop_mode not in {'none', 'random', 'center'}:
raise Exception('crop_mode should be "none" or "random" or "center" '
'Received arg: ', crop_mode)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_index = 1
self.row_index = 2
self.col_index = 3
if data_format == 'channels_last':
self.channel_index = 3
self.row_index = 1
self.col_index = 2
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow_from_directory(self, file_path, data_dir, data_suffix,
label_dir, label_suffix, classes,
ignore_label=255,
target_size=None, color_mode='rgb',
class_mode='sparse',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
loss_shape=None):
if self.crop_mode == 'random' or self.crop_mode == 'center':
target_size = self.crop_size
return SegDirectoryIterator(
file_path, self,
data_dir=data_dir, data_suffix=data_suffix,
label_dir=label_dir, label_suffix=label_suffix,
classes=classes, ignore_label=ignore_label,
crop_mode=self.crop_mode, label_cval=self.label_cval,
pad_size=self.pad_size,
target_size=target_size, color_mode=color_mode,
data_format=self.data_format, class_mode=class_mode,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_format=save_format,
loss_shape=loss_shape)
def standardize(self, x):
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_index = self.channel_index - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_index, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)
if self.featurewise_center:
x -= self.mean
if self.featurewise_std_normalization:
x /= (self.std + 1e-7)
if self.channelwise_center:
x -= self.ch_mean
return x
def random_transform(self, x, y):
# x is a single image, so it doesn't have image number at index 0
img_row_index = self.row_index - 1
img_col_index = self.col_index - 1
img_channel_index = self.channel_index - 1
if self.crop_mode == 'none':
crop_size = (x.shape[img_row_index], x.shape[img_col_index])
else:
crop_size = self.crop_size
assert x.shape[img_row_index] == y.shape[img_row_index] and x.shape[img_col_index] == y.shape[
img_col_index], 'DATA ERROR: Different shape of data and label!\ndata shape: %s, label shape: %s' % (str(x.shape), str(y.shape))
# use composition of homographies to generate final transform that
# needs to be applied
if self.rotation_range:
theta = np.pi / 180 * \
np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if self.height_shift_range:
# * x.shape[img_row_index]
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range) * crop_size[0]
else:
tx = 0
if self.width_shift_range:
# * x.shape[img_col_index]
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range) * crop_size[1]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0], self.zoom_range[1], 2)
if self.zoom_maintain_shape:
zy = zx
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(
np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=self.fill_mode, cval=self.cval)
y = apply_transform(y, transform_matrix, img_channel_index,
fill_mode='constant', cval=self.label_cval)
if self.channel_shift_range != 0:
x = random_channel_shift(
x, self.channel_shift_range, img_channel_index)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
y = flip_axis(y, img_col_index)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
y = flip_axis(y, img_row_index)
if self.crop_mode == 'center':
x, y = pair_center_crop(x, y, self.crop_size, self.data_format)
elif self.crop_mode == 'random':
x, y = pair_random_crop(x, y, self.crop_size, self.data_format)
# TODO:
# channel-wise normalization
# barrel/fisheye
return x, y
def fit(self, X,
augment=False,
rounds=1,
seed=None):
'''Required for featurewise_center and featurewise_std_normalization
# Arguments
X: Numpy array, the data to fit on.
augment: whether to fit on randomly augmented samples
rounds: if `augment`,
how many augmentation passes to do over the data
seed: random seed.
'''
X = np.copy(X)
if augment:
aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))
for r in range(rounds):
for i in range(X.shape[0]):
aX[i + r * X.shape[0]] = self.random_transform(X[i])
X = aX
if self.featurewise_center:
self.mean = np.mean(X, axis=0)
X -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(X, axis=0)
X /= (self.std + 1e-7)
def set_ch_mean(self, ch_mean):
self.ch_mean = ch_mean
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class LessTest(unittest.TestCase):
def setUp(self):
self.len = 24
self.np_a = np.random.randn(self.len).astype(np.float32)
self.np_b = np.random.randn(self.len).astype(np.float32)
self.dc_a = dc.array(list(self.np_a));
self.dc_b = dc.array(list(self.np_b));
def test_Less1D (self):
npr = np.less(self.np_a, self.np_b)
dcr = dc.less(self.dc_a, self.dc_b)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
def test_Less2D (self):
np_a = np.reshape(self.np_a, (6,4))
np_b = np.reshape(self.np_b, (6,4))
dc_a = dc.reshape(self.dc_a, (6,4));
dc_b = dc.reshape(self.dc_b, (6,4));
npr = np.less(np_a, np_b);
dcr = dc.less(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
def test_Less3D (self):
np_a = np.reshape(self.np_a, (2,4,3))
np_b = np.reshape(self.np_b, (2,4,3))
dc_a = dc.reshape(self.dc_a, (2,4,3));
dc_b = dc.reshape(self.dc_b, (2,4,3));
npr = np.less(np_a, np_b);
dcr = dc.less(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
def test_Equal4D (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
np_b = np.reshape(self.np_b, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3))
dc_b = dc.reshape(self.dc_b, (2,2,2,3))
npr = np.less(np_a, np_b)
dcr = dc.less(dc_a, dc_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2.9 on 2020-07-23 13:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messier_objects', '0002_auto_20200723_1438'),
]
operations = [
migrations.AlterField(
model_name='messierobject',
name='photo',
field=models.ImageField(default='notcaptured.JPG', upload_to='messier_objects'),
),
]
|
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
import sys
import os
import logging
import json
import test_case
from vnc_api.exceptions import NoIdError, RefsExistError
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
from vnc_api.utils import obj_type_to_vnc_class
import shutil
sys.path.append("../common/tests")
from time import sleep
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def retry_exc_handler(tries_remaining, exception, delay):
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (exception, tries_remaining, delay)
def retries(max_tries, delay=5, backoff=1, exceptions=(Exception,),hook=None):
def dec(func):
def f2(*args, **kwargs):
mydelay = delay
tries = range(max_tries)
tries.reverse()
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except exceptions as e:
if tries_remaining > 0:
if hook is not None:
hook(tries_remaining, e, mydelay)
sleep(mydelay)
mydelay = mydelay * backoff
else:
raise
return f2
return dec
#Testing if all the objects in the json file are created. If not, create them.
class TestInitData1(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestInitData1, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'fabric_ansible_dir',
"../fabric-ansible/ansible-playbooks")])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestInitData1, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def create_object(self, object, res_type, fq_name):
# Get the class name from object type
vnc_cls = obj_type_to_vnc_class(res_type, __name__)
instance_obj = vnc_cls.from_dict(**object)
try:
if(res_type == "job-template"):
schema_name = fq_name.replace('template', 'schema.json')
with open(os.path.join("../fabric-ansible/ansible-playbooks" +
'/schema/', schema_name),'r+') as schema_file:
schema_json = json.load(schema_file)
object["job_template_input_schema"] = schema_json.get(
"input_schema")
object["job_template_output_schema"] = schema_json.get(
"output_schema")
self._vnc_lib.job_template_create(instance_obj)
else:
self._vnc_lib._object_create(res_type, instance_obj)
except RefsExistError:
pass
def test_load_init_data_2(self):
object = {}
res_type = ""
fq_name = ""
try:
with open("../fabric-ansible/ansible-playbooks/conf"
"/predef_payloads.json") as data_file:
input_json = json.load(data_file)
for item in input_json.get('data'):
res_type = item.get("object_type")
for object in item.get("objects"):
fq_name = object.get("name")
self._vnc_lib._object_read(res_type=res_type, fq_name=fq_name)
except NoIdError:
self.create_object(object, res_type, fq_name)
except Exception as e:
print ("Test failed due to unexpected error: %s" % str(e))
# Test when object_type having invalid name
class TestInitDataError2(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
json_data = {
"data": [
{
"object_type": "abc",
"objects": [{"fq_name": ["test"]}]
}
]
}
if not os.path.exists("conf"):
os.makedirs("conf")
with open("conf/predef_payloads.json", "w") as f:
json.dump(json_data, f)
super(TestInitDataError2, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'fabric_ansible_dir',
".")])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
if os.path.exists("conf"):
shutil.rmtree("conf")
super(TestInitDataError2, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
@retries(5, hook=retry_exc_handler)
def test_load_init_data_02(self):
try:
ipam_fq_name = ['default-domain', 'default-project',
'service-chain-flat-ipam']
ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
if (ipam_obj):
jb_list = self._vnc_lib.job_templates_list()
self.assertEquals(len(jb_list.get('job-templates')), 0)
except Exception as e:
print( "Test failed due to unexpected error: %s" % str(e))
# Testing when json is invalid
class TestInitDataError3(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
json_data = "abc"
if not os.path.exists("conf"):
os.makedirs("conf")
with open("conf/predef_payloads.json", "w") as f:
f.write(json_data)
super(TestInitDataError3, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'fabric_ansible_dir',
".")])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
if os.path.exists("conf"):
shutil.rmtree("conf")
super(TestInitDataError3, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
@retries(5, hook=retry_exc_handler)
def test_load_init_data_04(self):
try:
ipam_fq_name = ['default-domain', 'default-project',
'service-chain-flat-ipam']
ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
if (ipam_obj):
jb_list = self._vnc_lib.job_templates_list()
self.assertEquals(len(jb_list.get('job-templates')), 0)
except Exception as e:
print("Test failed due to unexpected error: %s" % str(e))
# Testing when tag type is unknown
class TestInitDataError4(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
# create a file in current dir and put some invalid json
# create predef_payloads.json and schema/files
json_data = {
"data": [
{
"object_type": "tag",
"objects": [
{
"fq_name": [
"abc=management_ip"
],
"name": "abc=management_ip",
"tag_type_name": "abc",
"tag_value": "management_ip"
}
]
}
]
}
if not os.path.exists("conf"):
os.makedirs("conf")
with open("conf/predef_payloads.json", "w") as f:
json.dump(json_data, f)
super(TestInitDataError4, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'fabric_ansible_dir',
".")])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
if os.path.exists("conf"):
shutil.rmtree("conf")
super(TestInitDataError4, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
@retries(5, hook=retry_exc_handler)
def test_load_init_data_05(self):
try:
ipam_fq_name = ['default-domain', 'default-project',
'service-chain-flat-ipam']
ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
if (ipam_obj):
tags = self._vnc_lib.tags_list()
self.assertEquals(len(tags.get('tags')), 0)
except Exception as e:
print("Test failed due to unexpected error: %s" % str(e))
|
# coding: utf-8
"""
3Di API
3Di simulation API (latest version: 3.0) Framework release: 1.0.16 3Di core release: 2.0.11 deployed on: 07:33AM (UTC) on September 04, 2020 # noqa: E501
The version of the OpenAPI document: 3.0
Contact: info@nelen-schuurmans.nl
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class NetCDFTimeseriesRain(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'url': 'str',
'multiplier': 'float',
'simulation': 'str',
'offset': 'int',
'duration': 'int',
'timestamps': 'list[int]',
'interval': 'int',
'values_reference': 'str',
'fill_value': 'str',
'units': 'str',
'file': 'FileReadOnly',
'uid': 'str'
}
attribute_map = {
'url': 'url',
'multiplier': 'multiplier',
'simulation': 'simulation',
'offset': 'offset',
'duration': 'duration',
'timestamps': 'timestamps',
'interval': 'interval',
'values_reference': 'values_reference',
'fill_value': 'fill_value',
'units': 'units',
'file': 'file',
'uid': 'uid'
}
def __init__(self, url=None, multiplier=None, simulation=None, offset=None, duration=None, timestamps=None, interval=None, values_reference=None, fill_value=None, units=None, file=None, uid=None, local_vars_configuration=None): # noqa: E501
"""NetCDFTimeseriesRain - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._url = None
self._multiplier = None
self._simulation = None
self._offset = None
self._duration = None
self._timestamps = None
self._interval = None
self._values_reference = None
self._fill_value = None
self._units = None
self._file = None
self._uid = None
self.discriminator = None
if url is not None:
self.url = url
if multiplier is not None:
self.multiplier = multiplier
if simulation is not None:
self.simulation = simulation
self.offset = offset
self.duration = duration
self.timestamps = timestamps
self.interval = interval
self.values_reference = values_reference
if fill_value is not None:
self.fill_value = fill_value
self.units = units
if file is not None:
self.file = file
if uid is not None:
self.uid = uid
@property
def url(self):
"""Gets the url of this NetCDFTimeseriesRain. # noqa: E501
:return: The url of this NetCDFTimeseriesRain. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this NetCDFTimeseriesRain.
:param url: The url of this NetCDFTimeseriesRain. # noqa: E501
:type: str
"""
self._url = url
@property
def multiplier(self):
"""Gets the multiplier of this NetCDFTimeseriesRain. # noqa: E501
:return: The multiplier of this NetCDFTimeseriesRain. # noqa: E501
:rtype: float
"""
return self._multiplier
@multiplier.setter
def multiplier(self, multiplier):
"""Sets the multiplier of this NetCDFTimeseriesRain.
:param multiplier: The multiplier of this NetCDFTimeseriesRain. # noqa: E501
:type: float
"""
self._multiplier = multiplier
@property
def simulation(self):
"""Gets the simulation of this NetCDFTimeseriesRain. # noqa: E501
:return: The simulation of this NetCDFTimeseriesRain. # noqa: E501
:rtype: str
"""
return self._simulation
@simulation.setter
def simulation(self, simulation):
"""Sets the simulation of this NetCDFTimeseriesRain.
:param simulation: The simulation of this NetCDFTimeseriesRain. # noqa: E501
:type: str
"""
self._simulation = simulation
@property
def offset(self):
"""Gets the offset of this NetCDFTimeseriesRain. # noqa: E501
offset of event in simulation in seconds # noqa: E501
:return: The offset of this NetCDFTimeseriesRain. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this NetCDFTimeseriesRain.
offset of event in simulation in seconds # noqa: E501
:param offset: The offset of this NetCDFTimeseriesRain. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
offset is not None and offset > 2147483647): # noqa: E501
raise ValueError("Invalid value for `offset`, must be a value less than or equal to `2147483647`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
offset is not None and offset < -2147483648): # noqa: E501
raise ValueError("Invalid value for `offset`, must be a value greater than or equal to `-2147483648`") # noqa: E501
self._offset = offset
@property
def duration(self):
"""Gets the duration of this NetCDFTimeseriesRain. # noqa: E501
Duration of event in seconds # noqa: E501
:return: The duration of this NetCDFTimeseriesRain. # noqa: E501
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this NetCDFTimeseriesRain.
Duration of event in seconds # noqa: E501
:param duration: The duration of this NetCDFTimeseriesRain. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
duration is not None and duration > 2147483647): # noqa: E501
raise ValueError("Invalid value for `duration`, must be a value less than or equal to `2147483647`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
duration is not None and duration < -2147483648): # noqa: E501
raise ValueError("Invalid value for `duration`, must be a value greater than or equal to `-2147483648`") # noqa: E501
self._duration = duration
@property
def timestamps(self):
"""Gets the timestamps of this NetCDFTimeseriesRain. # noqa: E501
in simulation in seconds # noqa: E501
:return: The timestamps of this NetCDFTimeseriesRain. # noqa: E501
:rtype: list[int]
"""
return self._timestamps
@timestamps.setter
def timestamps(self, timestamps):
"""Sets the timestamps of this NetCDFTimeseriesRain.
in simulation in seconds # noqa: E501
:param timestamps: The timestamps of this NetCDFTimeseriesRain. # noqa: E501
:type: list[int]
"""
self._timestamps = timestamps
@property
def interval(self):
"""Gets the interval of this NetCDFTimeseriesRain. # noqa: E501
interval in seconds # noqa: E501
:return: The interval of this NetCDFTimeseriesRain. # noqa: E501
:rtype: int
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this NetCDFTimeseriesRain.
interval in seconds # noqa: E501
:param interval: The interval of this NetCDFTimeseriesRain. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
interval is not None and interval > 2147483647): # noqa: E501
raise ValueError("Invalid value for `interval`, must be a value less than or equal to `2147483647`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
interval is not None and interval < 0): # noqa: E501
raise ValueError("Invalid value for `interval`, must be a value greater than or equal to `0`") # noqa: E501
self._interval = interval
@property
def values_reference(self):
"""Gets the values_reference of this NetCDFTimeseriesRain. # noqa: E501
:return: The values_reference of this NetCDFTimeseriesRain. # noqa: E501
:rtype: str
"""
return self._values_reference
@values_reference.setter
def values_reference(self, values_reference):
"""Sets the values_reference of this NetCDFTimeseriesRain.
:param values_reference: The values_reference of this NetCDFTimeseriesRain. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
values_reference is not None and len(values_reference) > 255):
raise ValueError("Invalid value for `values_reference`, length must be less than or equal to `255`") # noqa: E501
self._values_reference = values_reference
@property
def fill_value(self):
"""Gets the fill_value of this NetCDFTimeseriesRain. # noqa: E501
:return: The fill_value of this NetCDFTimeseriesRain. # noqa: E501
:rtype: str
"""
return self._fill_value
@fill_value.setter
def fill_value(self, fill_value):
"""Sets the fill_value of this NetCDFTimeseriesRain.
:param fill_value: The fill_value of this NetCDFTimeseriesRain. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
fill_value is not None and len(fill_value) > 128):
raise ValueError("Invalid value for `fill_value`, length must be less than or equal to `128`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
fill_value is not None and len(fill_value) < 1):
raise ValueError("Invalid value for `fill_value`, length must be greater than or equal to `1`") # noqa: E501
self._fill_value = fill_value
@property
def units(self):
"""Gets the units of this NetCDFTimeseriesRain. # noqa: E501
:return: The units of this NetCDFTimeseriesRain. # noqa: E501
:rtype: str
"""
return self._units
@units.setter
def units(self, units):
"""Sets the units of this NetCDFTimeseriesRain.
:param units: The units of this NetCDFTimeseriesRain. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and units is None: # noqa: E501
raise ValueError("Invalid value for `units`, must not be `None`") # noqa: E501
allowed_values = ["mm", "mm/h"] # noqa: E501
if self.local_vars_configuration.client_side_validation and units not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `units` ({0}), must be one of {1}" # noqa: E501
.format(units, allowed_values)
)
self._units = units
@property
def file(self):
"""Gets the file of this NetCDFTimeseriesRain. # noqa: E501
:return: The file of this NetCDFTimeseriesRain. # noqa: E501
:rtype: FileReadOnly
"""
return self._file
@file.setter
def file(self, file):
"""Sets the file of this NetCDFTimeseriesRain.
:param file: The file of this NetCDFTimeseriesRain. # noqa: E501
:type: FileReadOnly
"""
self._file = file
@property
def uid(self):
"""Gets the uid of this NetCDFTimeseriesRain. # noqa: E501
:return: The uid of this NetCDFTimeseriesRain. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this NetCDFTimeseriesRain.
:param uid: The uid of this NetCDFTimeseriesRain. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetCDFTimeseriesRain):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NetCDFTimeseriesRain):
return True
return self.to_dict() != other.to_dict()
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from django.utils.translation import ugettext as _
from dataflow.pizza_settings import KAFKA_OP_ROLE_NAME
from dataflow.shared.api.modules.datamanage import DatamanageApi
from dataflow.shared.api.util.api_driver import APIResponseUtil as res_util
from dataflow.shared.databus.databus_helper import DatabusHelper
class DatamanageHelper(object):
@staticmethod
def op_metric_report(message, kafka_topic, tags):
request_params = {
"message": json.dumps(message),
"kafka_topic": kafka_topic,
"tags": tags,
}
res = DatamanageApi.metrics.report(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_result_table_metric(database, sql, geog_area_code):
request_params = {"database": database, "sql": sql, "tags": [geog_area_code]}
res = DatamanageApi.metrics.query(request_params)
res_util.check_response(res)
return res.data
# 优化指标,不再采用 sql 查询的方式
@staticmethod
def get_result_table_metric_v2(request_params):
res = DatamanageApi.metrics_v2.list(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_alert_detail_v2(request_params):
res = DatamanageApi.alert_detail.list(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_batch_executions(processing_ids):
"""
{
'interval': 3600,
'execute_history': []
}
@param processing_ids:
@return:
"""
request_params = {"processing_ids": processing_ids}
res = DatamanageApi.dmonitor.batch_executions(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_alert_details(flow_id, start_time, end_time):
"""
[{
'message': 'xxx',
'message_en': 'xxx',
'full_message': 'xxx',
'full_message_en': 'xxx'
}]
@param flow_id:
@param start_time:
@param end_time:
@return:
"""
request_params = {
"flow_id": flow_id,
"start_time": start_time,
"end_time": end_time,
"dimensions": json.dumps({"generate_type": "user"}),
}
res = DatamanageApi.dmonitor.alert_details(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_metric_kafka_server(geog_area_code):
channels_info = DatabusHelper.list_channel_info([KAFKA_OP_ROLE_NAME, geog_area_code])
if len(channels_info) != 1:
raise Exception(_("kafka-op 连接信息不唯一,请联系管理员处理."))
metric_kafka_server = "{}:{}".format(
channels_info[0]["cluster_domain"],
channels_info[0]["cluster_port"],
)
return metric_kafka_server
@staticmethod
def create_alert_config(flow_id, dmonitor_type="alert_configs"):
request_params = {
"flow_id": flow_id,
"dmonitor_type": dmonitor_type,
}
res = DatamanageApi.dmonitor_dataflow.create(request_params)
res_util.check_response(res)
return res.data
# 数据修正相关
@staticmethod
def create_data_correct(params):
res = DatamanageApi.data_correct.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_correct(params):
res = DatamanageApi.data_correct.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_correct(params):
res = DatamanageApi.data_correct.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_correct(params):
res = DatamanageApi.data_correct.delete(params)
res_util.check_response(res)
return res.data
# 数据模型应用相关
@staticmethod
def create_data_model_instance(params):
res = DatamanageApi.data_model_instance.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_model_instance(params):
res = DatamanageApi.data_model_instance.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_model_instance(params):
res = DatamanageApi.data_model_instance.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_model_instance(params):
res = DatamanageApi.data_model_instance.delete(params)
res_util.check_response(res)
return res.data
@staticmethod
def rollback_data_model_instance(params):
res = DatamanageApi.data_model_instance.rollback(params)
res_util.check_response(res)
return res.data
# 数据模型指标相关
@staticmethod
def create_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.delete(params)
res_util.check_response(res)
return res.data
@staticmethod
def rollback_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.rollback(params)
res_util.check_response(res)
return res.data
@staticmethod
def check_data_model_instance(params):
res = DatamanageApi.data_model_instance_check.list(params)
return res
|
#main game section
# %%
plansza_do_gry = {'7':' ','8':' ','9':' ',
'4':' ','5':' ','6':' ',
'1':' ','2':' ','3':' '}
klawisze_gry=[]
for key in plansza_do_gry:
klawisze_gry.append(key)
# print(klawisze_gry)
def drukuj_plansze(pole):
print(f"{pole['7']} | {pole['8']} | {pole['9']}")
print('- + - + -')
print(f"{pole['4']} | {pole['5']} | {pole['6']}")
print('- + - + -')
print(f"{pole['1']} | {pole['2']} | {pole['3']}")
# drukuj_plansze(plansza_do_gry)
def gra():
gracz = 'X'
licznik=0
for i in range(10):
drukuj_plansze(plansza_do_gry)
move=input(f"To jest ruch, {gracz}. Wybierz gdzie chcesz postawić znak")
if plansza_do_gry[move] == ' ':
plansza_do_gry[move] = gracz
licznik += 1
else:
print('miejsce zajęte\nwstaw znak w inne pole')
continue
if licznik >=5: #i
if plansza_do_gry['7'] == plansza_do_gry['8'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['4'] == plansza_do_gry['5'] == plansza_do_gry['6'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['2'] == plansza_do_gry['3'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['4'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['2'] == plansza_do_gry['5'] == plansza_do_gry['8'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['6'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['5'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['5'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
if licznik == 9:
print("\nKoniec Gry")
print("remis")
if gracz == 'X':
gracz = 'O'
else:
gracz = 'X'
restart = input('grasz ponownie?/n(t/n')
if restart == 't' or restart == 'T':
for key in klawisze_gry:
plansza_do_gry[key] = ' '
gra() #wywołanie rekurencyjne
#superfunkcja
if __name__ == '__main__': #dotyczy pakietów i pakowania do pakietu
gra()
# %%
|
"""
WSGI config for thread_33988 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thread_33988.settings')
application = get_wsgi_application()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import sys
import random
from tqdm import tqdm, trange
import numpy as np
from scipy.special import softmax
# from sklearn.utils.extmath import softmax
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss, MSELoss
from tensorboardX import SummaryWriter
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--loss_weight",
default=None,
type=str,
help="The Loss Weight.")
parser.add_argument("--pop_classifier_layer",
action='store_true',
help="pop classifier layer")
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict",
action='store_true',
help="Whether to run predict on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--predict_batch_size",
default=8,
type=int,
help="Total batch size for predict.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir',
action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
args.device = device
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval and not args.do_predict:
raise ValueError("At least one of `do_train`, `do_eval` or `do_predict` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
print("pop_classifier_layer", args.pop_classifier_layer)
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels, pop_classifier_layer=args.pop_classifier_layer)
if args.local_rank == 0:
torch.distributed.barrier()
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
print("loss_weight", args.loss_weight)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
# Prepare data loader
train_examples = processor.get_train_examples(args.data_dir)
cached_train_features_file = os.path.join(args.data_dir, 'train_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# define a new function to compute loss values for both output_modes
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
# print(input_ids)
# print(logits)
# print(label_ids)
if output_mode == "classification":
if args.loss_weight == None:
loss_fct = CrossEntropyLoss()
else:
loss_weight= [int(_) for _ in args.loss_weight.split(",")]
loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())
loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), label_ids.view(-1))
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.local_rank in [-1, 0]:
tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
tb_writer.add_scalar('loss', loss.item(), global_step)
### Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
### Example:
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
# Good practice: save your training arguments together with the trained model
output_args_file = os.path.join(args.output_dir, 'training_args.bin')
torch.save(args, output_args_file)
else:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
### Evaluation
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_eval_features_file, "rb") as reader:
eval_features = pickle.load(reader)
except:
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving eval features into cached file %s", cached_eval_features_file)
with open(cached_eval_features_file, "wb") as writer:
pickle.dump(eval_features, writer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data) # Note that this sampler samples randomly
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
out_label_ids = None
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
print(logits )
print(label_ids)
print(logits.view(-1, num_labels), label_ids.view(-1))
# create eval loss and other metric required by the task
if output_mode == "classification":
if args.loss_weight == None:
loss_fct = CrossEntropyLoss()
else:
loss_weight= [int(_) for _ in args.loss_weight.split(",")]
loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
print(preds)
def swap_value(a):
temp=a[0];a[0]=a[1];a[1]=temp
if task_name == "copa":
preds = softmax(preds,axis=1)
print(preds)
for i in range(int(len(preds)/2)):
if preds[2*i][0]>=preds[2*i+1][0]:
if preds[2*i][0]<preds[2*i][1]:
# print(preds[2*i][0], preds[2*i][1])
swap_value(preds[2*i])
# print(preds[2*i][0], preds[2*i][1])
if preds[2*i+1][0]>preds[2*i+1][1]:
swap_value(preds[2*i+1])
else:
if preds[2*i][0]>preds[2*i][1]:
swap_value(preds[2*i])
if preds[2*i+1][0]<preds[2*i+1][1]:
swap_value(preds[2*i+1])
print(preds)
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
print(preds,out_label_ids)
result = compute_metrics(task_name, preds, out_label_ids)
loss = tr_loss/global_step if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
### Prediction
if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
predict_examples = processor.get_test_examples(args.data_dir)
cached_predict_features_file = os.path.join(args.data_dir, 'predict_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_predict_features_file, "rb") as reader:
predict_features = pickle.load(reader)
except:
predict_features = convert_examples_to_features(
predict_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving predict features into cached file %s", cached_predict_features_file)
with open(cached_predict_features_file, "wb") as writer:
pickle.dump(predict_features, writer)
logger.info("***** Running prediction *****")
logger.info(" Num examples = %d", len(predict_examples))
logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in predict_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in predict_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in predict_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.float)
predict_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
if args.local_rank == -1:
predict_sampler = SequentialSampler(predict_data)
else:
predict_sampler = DistributedSampler(predict_data) # Note that this sampler samples randomly
predict_dataloader = DataLoader(predict_data, sampler=predict_sampler, batch_size=args.predict_batch_size)
model.eval()
# predict_loss = 0
# nb_predict_steps = 0
preds = []
out_label_ids = None
for input_ids, input_mask, segment_ids, label_ids in tqdm(predict_dataloader, desc="predicting"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
print(logits )
print(label_ids)
# create eval loss and other metric required by the task
# if output_mode == "classification":
# loss_fct = CrossEntropyLoss()
# tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
# elif output_mode == "regression":
# loss_fct = MSELoss()
# tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
#
# eval_loss += tmp_eval_loss.mean().item()
# nb_predict_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
# out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(
# out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
#
# eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
print(preds)
if task_name == "copa":
preds = softmax(preds,axis=1)
print(preds)
results=[]
for i in range(int(len(preds)/2)):
if preds[2*i][0]>=preds[2*i+1][0]:
results.append(0)
else:
results.append(1)
preds= results
label_map = {i : i for i in range(2)}
else:
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
label_map = {i : label for i, label in enumerate(label_list)}
print(preds)
# result = compute_metrics(task_name, preds, out_label_ids)
# loss = tr_loss/global_step if args.do_train else None
# result['eval_loss'] = eval_loss
# result['global_step'] = global_step
# result['loss'] = loss
output_predict_file = os.path.join(args.output_dir, "predict_results.txt")
with open(output_predict_file, "w") as writer:
logger.info("***** Predict results *****")
for i in range(len(preds)):
label_i = label_map[preds[i]]
# json_i= "\"idx: %d, \"label\": \"label_i\""
writer.write("{\"idx\": %d, \"label\": \"%s\"}\n"%(i,label_i))
# for key in sorted(result.keys()):
# logger.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
|
import compileall
import re
compileall.compile_dir(
'examples',
maxlevels=0,
)
|
from functools import partial
from unittest.mock import Mock, patch
import graphene
import pytest
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q
from django.shortcuts import reverse
from graphql.error import GraphQLError
from graphql_relay import to_global_id
from ...core.utils import from_global_id_or_error
from ...product.types import Product
from ...tests.utils import get_graphql_content
from ...utils import get_nodes
from ...utils.filters import filter_by_query_param
def test_middleware_dont_generate_sql_requests(client, settings, assert_num_queries):
"""When requesting on the GraphQL API endpoint, no SQL request should happen
indirectly. This test ensures that."""
# Enables the Graphql playground
settings.DEBUG = True
with assert_num_queries(0):
response = client.get(reverse("api"))
assert response.status_code == 200
def test_jwt_middleware(client, admin_user):
user_details_query = """
{
me {
email
}
}
"""
create_token_query = """
mutation {
tokenCreate(email: "admin@example.com", password: "password") {
token
}
}
"""
api_url = reverse("api")
api_client_post = partial(client.post, api_url, content_type="application/json")
# test setting AnonymousUser on unauthorized request to API
response = api_client_post(data={"query": user_details_query})
repl_data = response.json()
assert response.status_code == 200
assert isinstance(response.wsgi_request.user, AnonymousUser)
assert repl_data["data"]["me"] is None
# test creating a token for admin user
response = api_client_post(data={"query": create_token_query})
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
token = repl_data["data"]["tokenCreate"]["token"]
assert token is not None
# test request with proper JWT token authorizes the request to API
response = api_client_post(
data={"query": user_details_query}, HTTP_AUTHORIZATION=f"JWT {token}"
)
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
assert "errors" not in repl_data
assert repl_data["data"]["me"] == {"email": admin_user.email}
def test_real_query(user_api_client, product, channel_USD):
product_attr = product.product_type.product_attributes.first()
category = product.category
attr_value = product_attr.values.first()
query = """
query Root($categoryId: ID!, $sortBy: ProductOrder, $first: Int,
$attributesFilter: [AttributeInput], $channel: String) {
category(id: $categoryId) {
...CategoryPageFragmentQuery
__typename
}
products(first: $first, sortBy: $sortBy, filter: {categories: [$categoryId],
attributes: $attributesFilter}, channel: $channel) {
...ProductListFragmentQuery
__typename
}
attributes(first: 20, filter: {inCategory: $categoryId}, channel: $channel) {
edges {
node {
...ProductFiltersFragmentQuery
__typename
}
}
}
}
fragment CategoryPageFragmentQuery on Category {
id
name
ancestors(first: 20) {
edges {
node {
name
id
__typename
}
}
}
children(first: 20) {
edges {
node {
name
id
slug
__typename
}
}
}
__typename
}
fragment ProductListFragmentQuery on ProductCountableConnection {
edges {
node {
...ProductFragmentQuery
__typename
}
__typename
}
pageInfo {
hasNextPage
__typename
}
__typename
}
fragment ProductFragmentQuery on Product {
id
isAvailable
name
pricing {
...ProductPriceFragmentQuery
__typename
}
thumbnailUrl1x: thumbnail(size: 255){
url
}
thumbnailUrl2x: thumbnail(size: 510){
url
}
__typename
}
fragment ProductPriceFragmentQuery on ProductPricingInfo {
discount {
gross {
amount
currency
__typename
}
__typename
}
priceRange {
stop {
gross {
amount
currency
__typename
}
currency
__typename
}
start {
gross {
amount
currency
__typename
}
currency
__typename
}
__typename
}
__typename
}
fragment ProductFiltersFragmentQuery on Attribute {
id
name
slug
choices(first: 10) {
edges {
node {
id
name
slug
__typename
}
}
}
__typename
}
"""
variables = {
"categoryId": graphene.Node.to_global_id("Category", category.id),
"sortBy": {"field": "NAME", "direction": "ASC"},
"first": 1,
"attributesFilter": [
{"slug": f"{product_attr.slug}", "values": [f"{attr_value.slug}"]}
],
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
get_graphql_content(response)
def test_get_nodes(product_list):
global_ids = [to_global_id("Product", product.pk) for product in product_list]
# Make sure function works even if duplicated ids are provided
global_ids.append(to_global_id("Product", product_list[0].pk))
# Return products corresponding to global ids
products = get_nodes(global_ids, Product)
assert products == product_list
# Raise an error if requested id has no related database object
nonexistent_item = Mock(type="Product", pk=-1)
nonexistent_item_global_id = to_global_id(
nonexistent_item.type, nonexistent_item.pk
)
global_ids.append(nonexistent_item_global_id)
msg = "There is no node of type {} with pk {}".format(
nonexistent_item.type, nonexistent_item.pk
)
with pytest.raises(AssertionError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
global_ids.pop()
# Raise an error if one of the node is of wrong type
invalid_item = Mock(type="test", pk=-1)
invalid_item_global_id = to_global_id(invalid_item.type, invalid_item.pk)
global_ids.append(invalid_item_global_id)
with pytest.raises(GraphQLError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (f"Must receive Product id: {invalid_item_global_id}.",)
# Raise an error if no nodes were found
global_ids = []
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
# Raise an error if pass wrong ids
global_ids = ["a", "bb"]
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
@patch("saleor.product.models.Product.objects")
def test_filter_by_query_param(qs):
qs.filter.return_value = qs
qs = filter_by_query_param(qs, "test", ["name", "force"])
test_kwargs = {"name__icontains": "test", "force__icontains": "test"}
q_objects = Q()
for q in test_kwargs:
q_objects |= Q(**{q: test_kwargs[q]})
# FIXME: django 1.11 fails on called_once_with(q_objects)
qs.filter.call_count == 1
def test_from_global_id_or_error(product):
invalid_id = "invalid"
message = f"Couldn't resolve id: {invalid_id}."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(invalid_id)
assert str(error.value) == message
def test_from_global_id_or_error_wth_invalid_type(product):
product_id = graphene.Node.to_global_id("Product", product.id)
message = "Must receive a ProductVariant id."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(product_id, "ProductVariant", raise_error=True)
assert str(error.value) == message
def test_from_global_id_or_error_wth_type(product):
expected_product_type = str(Product)
expected_product_id = graphene.Node.to_global_id(expected_product_type, product.id)
product_type, product_id = from_global_id_or_error(
expected_product_id, expected_product_type
)
assert product_id == str(product.id)
assert product_type == expected_product_type
|
import numpy
import os
import math
from azureml.core.model import Model
from azureml.core.dataset import Dataset
from inference_schema.schema_decorators \
import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type \
import NumpyParameterType
import keras
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from azureml.core.run import Run
from azureml.core import Dataset, Datastore, Workspace
import argparse
import json
import pandas as pd
import numpy as np
from azureml.core.authentication import ServicePrincipalAuthentication
# from azureml.core.authentication import InteractiveLoginAuthentication
def tts(data):
data['date'] = pd.to_datetime(data['date'])
data['date'] = (data['date'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
(train, test) = data[0:-2000].values, data[-2000:].values
return (train, test)
def scale_data(train_set, test_set):
# apply Min Max Scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train_set[:, :4])
# reshape training set
train_set = train_set.reshape(train_set.shape[0], train_set.shape[1])
train_set_scaled = scaler.transform(train_set[:, :4])
# reshape test set
test_set = test_set.reshape(test_set.shape[0], test_set.shape[1])
test_set_scaled = scaler.transform(test_set[:, :4])
X_train, y_train = train_set[:, :4], train_set[:, 4:].ravel()
X_test, y_test = test_set[:, :4], test_set[:, 4:].ravel()
return X_train, y_train, X_test, y_test, scaler
def init():
# load the model from file into a global object
global model
model_path = Model.get_model_path(
os.getenv("AZUREML_MODEL_DIR").split('/')[-2])
print ("model path", model_path)
# try:
# print ("try")
# dataset = pd.read_csv('/var/azureml-app/train.csv')
# original_df = dataset.to_pandas_dataframe()
# except:
# print ("except")
# train_dataset = original_df.to_csv('train.csv', index=False)
# interactive_auth = InteractiveLoginAuthentication(tenant_id="def44f5f-0783-4b05-8f2f-dd615c5dfec4")
# ws = Workspace(subscription_id="6542067a-127a-43ff-b7f2-007fe21a37f0",
# resource_group="sales-mlops-rg",
# workspace_name="sales-mlops-ws",
# auth=interactive_auth)
# ws.get_details()
# print(original_df)
model = keras.models.load_model(model_path)
print("Current directory:", os.getcwd())
print("Model is loaded")
# date = '6/25/2020'
# store = 3
# item = 105
# price = 990
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
# input_sample = numpy.array([[date, store, item, price]])
# output_sample = numpy.array([4])
input_sample = numpy.array([[1591833600,34,759,690]])
output_sample = numpy.array([10])
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data, request_headers):
global original_df
sp = ServicePrincipalAuthentication(tenant_id="def44f5f-0783-4b05-8f2f-dd615c5dfec4", service_principal_id="add8f304-2d88-45e3-94fa-ac6cf335d5df", service_principal_password="If2-.7Wlno57NW6v9~nE~xNIj~naD-DL5f")
ws = Workspace.get(name="sales-mlops-ws", auth = sp, subscription_id="6542067a-127a-43ff-b7f2-007fe21a37f0")
ws.get_details()
dataset = ws.datasets['salesforecast_ds']
original_df = dataset.to_pandas_dataframe()
# date = '6/25/2020'
# store = 34
# item = 759
# price = 690
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
date = data[0][0]
prev_sales = []
(train, test) = tts(original_df)
X_train, y_train, X_test, y_test, scaler_object = scale_data(train, test)
first_date = original_df["date"][0]
for x in original_df.index:
last_date = original_df["date"][x]
print("last date", last_date)
days_diff = (int(date) - int(last_date)) / (60 * 60 * 24)
total_data_days = (int(last_date) - int(first_date)) / (60 * 60 * 24)
print("days:", days_diff)
print("total_data_days:", total_data_days)
for i in original_df.index:
if (original_df["item"][i] == data[0][2] and original_df["store"][i] == data[0][1]):
prev_sales.append(original_df["sales"][i])
prev_sales_avg = 0
prev_sales_avg = (sum(prev_sales)) / total_data_days
forecast_result_array = []
test_set = data
test_set_scaled = scaler_object.transform(test_set)
X_test = test_set_scaled[:, :4]
X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
y_pred = model.predict(X_test)
print("y_pred:",y_pred)
result = y_pred[0][0][0]
result = round(result)
print("result:",result)
prev_sales_avg = round (prev_sales_avg)
next_day_prediction = math.ceil(result + prev_sales_avg)
prev_sales.append(next_day_prediction)
forecast_result_array.append(next_day_prediction)
if days_diff > 1:
for day in range(round(days_diff)):
total_data_days += 1
prev_sales_avg = sum(prev_sales) / total_data_days
prev_sales_avg = round(prev_sales_avg)
prev_sales.append(prev_sales_avg)
forecast_result_array.append(prev_sales_avg)
end_result = sum(forecast_result_array)
print("end result: ", end_result)
print(('{{"RequestId":"{0}", '
'"TraceParent":"{1}", '
'"NumberOfPredictions":{2}}}'
).format(
request_headers.get("X-Ms-Request-Id", ""),
request_headers.get("Traceparent", ""),
end_result
))
return {"result": end_result}
if __name__ == "__main__":
init()
# date ='6/25/2020'
# store = 34
# item = 759
# price = 690
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
test = numpy.array([[date, store, item, price]])
#print("test:",test)
#test =numpy.array([[1591833600,34,759,690]])
prediction = run(test, {})
print("Test result: ", prediction)
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.billing_call_prefix'
db.add_column(u'shop_order', 'billing_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
# Adding field 'Order.shipping_call_prefix'
db.add_column(u'shop_order', 'shipping_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.billing_call_prefix'
db.delete_column(u'shop_order', 'billing_call_prefix')
# Deleting field 'Order.shipping_call_prefix'
db.delete_column(u'shop_order', 'shipping_call_prefix')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439968)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439200)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'shop.order': {
'Meta': {'object_name': 'Order'},
'_order_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.TextField', [], {}),
'billing_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'billing_country': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'billing_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'billing_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_subtotal': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'paid': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'shipping_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'shipping_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'shipping_same_as_billing': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'shipping_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'shop.orderitem': {
'Meta': {'unique_together': "(('order', 'product'),)", 'object_name': 'OrderItem'},
'_line_item_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'_line_item_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_line_item_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'_unit_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sale': ('django.db.models.fields.BooleanField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['shop.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.ProductVariant']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.TaxClass']", 'null': 'True', 'blank': 'True'}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'shop.orderpayment': {
'Meta': {'object_name': 'OrderPayment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'authorized': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['shop.Order']"}),
'payment_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module_key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'shop.orderstatus': {
'Meta': {'object_name': 'OrderStatus'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': u"orm['shop.Order']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '20'})
},
u'shop.taxclass': {
'Meta': {'object_name': 'TaxClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'zai_products.product': {
'Meta': {'object_name': 'Product'},
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_de': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['shop.TaxClass']"}),
'tax_included': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
u'zai_products.productvariant': {
'Meta': {'object_name': 'ProductVariant'},
'grip': ('django.db.models.fields.CharField', [], {'default': "'chocolate'", 'max_length': '64'}),
'hand': ('django.db.models.fields.CharField', [], {'default': "'left'", 'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'default': "'small'", 'max_length': '64'}),
'lie': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.Product']"}),
'shaft': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '64'}),
'special_requirements': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
}
}
complete_apps = ['shop']
|
import unittest
from simuloc.signal import Generator
class GeneratorTestCase(unittest.TestCase):
"""GeneratorTestCase tests the generator class."""
def setUp(self):
"""Creates a instance of the generator class."""
self.cinst = Generator()
def tearDown(self):
pass
def test_noise(self):
"""Tests the boundry of noise generator."""
self.assertTrue(self.cinst.noise(0.1) < 4)
if __name__ == '__main__':
unittest.main()
|
from source.camera import camera
from source.LaneDetect import LaneDetect
from moviepy.editor import VideoFileClip
import glob
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.image as mpimg
import numpy as np
import cv2
#
# def process_video(input_video_file):
# clip1 = VideoFileClip(input_video_file);
# outputclip = clip1.fl_image(process_vid)
# outputclip.write_videofile('output_'+input_video_file, audio=False);
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
if __name__ == "__main__":
print('main')
# images = glob.glob('../camera_cal/calibration*.jpg')
# print(images)
camera = camera()
# camera.calibration(images, x_cor=9, y_cor=6, outputfilename='./camera_calibration_data_1')
camera.load_calibration_data('./camera_calibration_data.p')
# # images = sorted(images, key=lambda x: float(re.findall("(\d+)", x)[0]))
#
# print('Correction images (successfully detected corners):')
# plt.figure(figsize=(11.5, 9))
# gridspec.GridSpec(6, 3)
# # Step through the list and search for chessboard corners
# for i, image in enumerate(camera_calibrate.calibration_images_success):
# plt.subplot2grid((6, 3), (i // 3, i % 3), colspan=1, rowspan=1)
# plt.imshow(image)
# plt.axis('off')
# plt.show()
#
# plt.figure(figsize=(12, 4))
# plt.figtext(.5, .8, 'Images in which cv2 failed to find desired corners', fontsize=22, ha='center')
# for i, p in enumerate(camera_calibrate.calibration_images_fail):
# plt.subplot(1, 3, i + 1)
# plt.imshow(mpimg.imread(p)) # draw the first image of each class
# plt.title(p)
# plt.axis('off')
# plt.tight_layout(pad=0, h_pad=0, w_pad=0)
# plt.show()
# plt.savefig('fail.png')
# camera_calibrate.load_calibration_data('./camera_calibration_data.p')
# orig_img = mpimg.imread('../test_images/test1.jpg')
# undist_img = camera_calibrate.undistort(orig_img)
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
# ax1.imshow(orig_img)
# ax1.set_title('Original', fontsize=20)
# ax2.imshow(undist_img)
# ax2.set_title('Undistorted', fontsize=20)
# # plt.show()
# plt.savefig('undistort2.png')
# Perspective transform
# for image in glob.glob('../test_images/*.jpg'):
# orig_img = cv2.imread(image)
# birdeye_img, _ = camera.birds_eye(orig_img)
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
# f.tight_layout()
# ax1.imshow(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
# ax1.set_title('Original', fontsize=20)
# ax2.imshow(cv2.cvtColor(birdeye_img, cv2.COLOR_BGR2RGB))
# ax2.set_title('Undistorted and Warped Image', fontsize=20)
# plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# plt.show()
# # plt.savefig('../output_images/warp_' + str(i) + '.png')
# # edege
# image = mpimg.imread('../test_images/test6.jpg')
# lane_detecter = LaneDetect()
# result = lane_detecter.get_edges(image)
#
# # Plot the result
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
# # f.tight_layout()
# ax1.axis('off')
# ax1.imshow(image)
# ax1.set_title('Original', fontsize=18)
# ax2.axis('off')
# ax2.set_title('Edge', fontsize=18)
#
#
# ax2.imshow(result, cmap='gray')
# plt.show()
# plt.savefig('edge.png')
# # Detect Lane line
# for image_name in glob.glob('../test_images/*.jpg'):
# orig_img = mpimg.imread(image_name)
#
# lane_detecter = LaneDetect()
# lane_detecter.initcamera()
# lane_detecter.initlines(orig_img)
# output_img = lane_detecter.process_pipeline(orig_img)
# f, (ax1) = plt.subplots(1, 1, figsize=(9, 6))
# ax1.imshow(output_img)
# ax1.set_title('output_img', fontsize=20)
# plt.axis('off')
# plt.show()
# break
# Applying pipeline to video
clip1 = VideoFileClip('../project_video.mp4')
lane_detecter = LaneDetect()
lane_detecter.initcamera()
lane_detecter.initlines(clip1.get_frame(0))
outputclip = clip1.fl_image(lane_detecter.process_pipeline)
outputclip.write_videofile('../output_videos/output_project_video.mp4', audio=False)
#
# clip1 = VideoFileClip('../harder_challenge_video.mp4');
# lane_detecter = LaneDetect(clip1.get_frame(0))
# outputclip = clip1.fl_image(lane_detecter.process_pipeline)
# outputclip.write_videofile('../output_harder_challenge_video.mp4', audio=False)
#
# clip1 = VideoFileClip('../challenge_video.mp4')
# lane_detecter = LaneDetect(clip1.get_frame(0))
# outputclip = clip1.fl_image(lane_detecter.process_pipeline)
# outputclip.write_videofile('../output_challenge_video.mp4', audio=False)
|
# Mistral documentation build configuration file
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinxcontrib.apidoc',
'openstackdocstheme',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# sphinxcontrib.apidoc options
apidoc_module_dir = '../../mistralclient'
apidoc_output_dir = 'api'
apidoc_excluded_paths = [
'test',
'tests/*']
apidoc_separate_modules = True
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mistral Client'
copyright = u'2016, Mistral Contributors'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mistralclient.']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'MistralClient'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': [
'sidebarlinks.html', 'localtoc.html', 'searchbox.html',
'sourcelink.html'
],
'**': [
'localtoc.html', 'relations.html',
'searchbox.html', 'sourcelink.html'
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mistraldoc'
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mistral_client', u'Mistral Client Documentation',
[u'Mistral Contributors'], 1)
]
# -- Options for openstackdocstheme -------------------------------------------
openstackdocs_repo_name = 'openstack/python-mistralclient'
openstackdocs_bug_project = 'python-mistralclient'
openstackdocs_bug_tag = ''
openstackdocs_auto_name = False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-08 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews', '0008_auto_20180623_2009'),
]
operations = [
migrations.AddField(
model_name='movie',
name='poster_thumbnail',
field=models.ImageField(blank=True, help_text='Upload the poster thumbnail', null=True, upload_to='movie_posters/thumbnails/'),
),
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pprint
from collections import defaultdict
from .context_query_attention import StructuredAttention
from .encoder import StackedEncoder
from .cnn import DepthwiseSeparableConv
from .model_utils import save_pickle, mask_logits, flat_list_of_lists, \
find_max_triples, get_high_iou_sapns, expand_span
class LinearWrapper(nn.Module):
"""1D conv layer"""
def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True):
super(LinearWrapper, self).__init__()
self.relu = relu
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [
nn.Dropout(dropout),
nn.Linear(in_hsz, out_hsz)
]
self.conv = nn.Sequential(*layers)
def forward(self, x):
"""(N, L, D)"""
if self.relu:
return F.relu(self.conv(x), inplace=True) # (N, L, D)
else:
return self.conv(x) # (N, L, D)
class ConvLinear(nn.Module):
"""1D conv layer"""
def __init__(self, in_hsz, out_hsz, kernel_size=3, layer_norm=True, dropout=0.1, relu=True):
super(ConvLinear, self).__init__()
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [
nn.Dropout(dropout),
DepthwiseSeparableConv(in_ch=in_hsz,
out_ch=out_hsz,
k=kernel_size,
dim=1,
relu=relu)
]
self.conv = nn.Sequential(*layers)
def forward(self, x):
"""(N, L, D)"""
return self.conv(x) # (N, L, D)
class STAGE(nn.Module):
def __init__(self, opt):
super(STAGE, self).__init__()
self.opt = opt
self.inference_mode = False
self.sub_flag = opt.sub_flag
self.vfeat_flag = opt.vfeat_flag
self.vfeat_size = opt.vfeat_size
self.t_iter = opt.t_iter
self.extra_span_length = opt.extra_span_length
self.add_local = opt.add_local
self.use_sup_att = opt.use_sup_att
self.num_negatives = opt.num_negatives
self.negative_pool_size = opt.negative_pool_size
self.num_hard = opt.num_hard
self.drop_topk = opt.drop_topk
self.margin = opt.margin
self.att_loss_type = opt.att_loss_type
self.scale = opt.scale
self.alpha = opt.alpha
self.dropout = opt.dropout
self.hsz = opt.hsz
self.bsz = None
self.num_seg = None
self.num_a = 5
self.flag_cnt = self.sub_flag + self.vfeat_flag
self.wd_size = opt.embedding_size
self.bridge_hsz = 300
self.bert_word_encoding_fc = nn.Sequential(
nn.LayerNorm(self.wd_size),
nn.Dropout(self.dropout),
nn.Linear(self.wd_size, self.bridge_hsz),
nn.ReLU(True),
nn.LayerNorm(self.bridge_hsz),
)
if self.sub_flag:
print("Activate sub branch")
if self.vfeat_flag:
print("Activate vid branch")
self.vid_fc = nn.Sequential(
nn.LayerNorm(self.vfeat_size),
nn.Dropout(self.dropout),
nn.Linear(self.vfeat_size, self.bridge_hsz),
nn.ReLU(True),
nn.LayerNorm(self.bridge_hsz)
)
if self.flag_cnt == 2:
self.concat_fc = nn.Sequential(
nn.LayerNorm(3 * self.hsz),
nn.Dropout(self.dropout),
nn.Linear(3 * self.hsz, self.hsz),
nn.ReLU(True),
nn.LayerNorm(self.hsz),
)
self.input_embedding = nn.Sequential(
nn.Dropout(self.dropout),
nn.Linear(self.bridge_hsz, self.hsz),
nn.ReLU(True),
nn.LayerNorm(self.hsz),
)
self.input_encoder = StackedEncoder(n_blocks=opt.input_encoder_n_blocks,
n_conv=opt.input_encoder_n_conv,
kernel_size=opt.input_encoder_kernel_size,
num_heads=opt.input_encoder_n_heads,
hidden_size=self.hsz,
dropout=self.dropout)
self.str_attn = StructuredAttention(dropout=self.dropout,
scale=opt.scale,
add_void=opt.add_non_visual) # no parameters inside
self.c2q_down_projection = nn.Sequential(
nn.LayerNorm(3 * self.hsz),
nn.Dropout(self.dropout),
nn.Linear(3*self.hsz, self.hsz),
nn.ReLU(True),
)
self.cls_encoder = StackedEncoder(n_blocks=opt.cls_encoder_n_blocks,
n_conv=opt.cls_encoder_n_conv,
kernel_size=opt.cls_encoder_kernel_size,
num_heads=opt.cls_encoder_n_heads,
hidden_size=self.hsz,
dropout=self.dropout)
self.cls_projection_layers = nn.ModuleList(
[
LinearWrapper(in_hsz=self.hsz,
out_hsz=self.hsz,
layer_norm=True,
dropout=self.dropout,
relu=True)
] +
[
ConvLinear(in_hsz=self.hsz,
out_hsz=self.hsz,
kernel_size=3,
layer_norm=True,
dropout=self.dropout,
relu=True)
for _ in range(self.t_iter)])
self.temporal_scoring_st_layers = nn.ModuleList([
LinearWrapper(in_hsz=self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
for _ in range(self.t_iter+1)])
self.temporal_scoring_ed_layers = nn.ModuleList([
LinearWrapper(in_hsz=self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
for _ in range(self.t_iter+1)])
self.temporal_criterion = nn.CrossEntropyLoss(reduction="sum")
self.classifier = LinearWrapper(in_hsz=self.hsz * 2 if self.add_local else self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
def load_word_embedding(self, pretrained_embedding, requires_grad=False):
self.word_embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))
self.word_embedding.weight.requires_grad = requires_grad
def forward(self, batch):
if self.inference_mode:
return self.forward_main(batch)
else:
out, att_loss, att_predictions, temporal_loss, temporal_predictions, other_outputs = self.forward_main(batch)
return out, att_loss, att_predictions, temporal_loss, temporal_predictions
def forward_main(self, batch):
"""
Args:
batch: edict, keys = qas, qas_mask, qa_noun_masks, sub, sub_mask, vcpt, vcpt_mask, vid, vid_mask,
att_labels, att_labels_mask, qid, target, vid_name, ts_label
qas, qas_mask, qa_noun_masks: (N, 5, Lqa)
sub, sub_mask: (N, #imgs, Ls)
vcpt, vcpt_mask: (N, #imgs, #regions)
vid, vid_mask: (N, #imgs, #regions, D), (N, #imgs, #regions)
att_labels, att_labels_mask: A list of N (#imgs, #qa-words, #regions)
qid: list(int)
vid_name: list(str)
target: torch.LongTensor
use_hard_negatives: bool, true to sample hard negatives
q_l: int, length of the tokenized question
anno_st_idx (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation.
ts_label: {"st": (N, ), "ed": (N, )} for 'st_ed'. (N, L) for 'frm'
ts_label_mask: (N, L) for both 'st_ed' and 'frm'
Returns:
"""
self.bsz = len(batch.qid)
bsz = self.bsz
num_a = self.num_a
hsz = self.hsz
a_embed = self.base_encoder(batch.qas_bert.view(bsz*num_a, -1, self.wd_size), # (N*5, L, D)
batch.qas_mask.view(bsz * num_a, -1), # (N*5, L)
self.bert_word_encoding_fc,
self.input_embedding,
self.input_encoder) # (N*5, L, D)
a_embed = a_embed.view(bsz, num_a, 1, -1, hsz) # (N, 5, 1, L, D)
a_mask = batch.qas_mask.view(bsz, num_a, 1, -1) # (N, 5, 1, L)
attended_sub, attended_vid, attended_vid_mask, attended_sub_mask = (None, ) * 4
other_outputs = {} # {"pos_noun_mask": batch.qa_noun_masks} # used to visualization and compute att acc
if self.sub_flag:
num_imgs, num_words = batch.sub_bert.shape[1:3]
sub_embed = self.base_encoder(batch.sub_bert.view(bsz*num_imgs, num_words, -1), # (N*Li, Lw)
batch.sub_mask.view(bsz * num_imgs, num_words), # (N*Li, Lw)
self.bert_word_encoding_fc,
self.input_embedding,
self.input_encoder) # (N*Li, Lw, D)
sub_embed = sub_embed.contiguous().view(bsz, 1, num_imgs, num_words, -1) # (N, Li, Lw, D)
sub_mask = batch.sub_mask.view(bsz, 1, num_imgs, num_words) # (N, 1, Li, Lw)
attended_sub, attended_sub_mask, sub_raw_s, sub_normalized_s = \
self.qa_ctx_attention(a_embed, sub_embed, a_mask, sub_mask,
noun_mask=None,
non_visual_vectors=None)
other_outputs["sub_normalized_s"] = sub_normalized_s
other_outputs["sub_raw_s"] = sub_raw_s
if self.vfeat_flag:
num_imgs, num_regions = batch.vid.shape[1:3]
vid_embed = F.normalize(batch.vid, p=2, dim=-1) # (N, Li, Lr, D)
vid_embed = self.base_encoder(vid_embed.view(bsz*num_imgs, num_regions, -1), # (N*Li, Lw)
batch.vid_mask.view(bsz * num_imgs, num_regions), # (N*Li, Lr)
self.vid_fc,
self.input_embedding,
self.input_encoder) # (N*Li, L, D)
vid_embed = vid_embed.contiguous().view(bsz, 1, num_imgs, num_regions, -1) # (N, 1, Li, Lr, D)
vid_mask = batch.vid_mask.view(bsz, 1, num_imgs, num_regions) # (N, 1, Li, Lr)
attended_vid, attended_vid_mask, vid_raw_s, vid_normalized_s = \
self.qa_ctx_attention(a_embed, vid_embed, a_mask, vid_mask,
noun_mask=None,
non_visual_vectors=None)
other_outputs["vid_normalized_s"] = vid_normalized_s
other_outputs["vid_raw_s"] = vid_raw_s
if self.flag_cnt == 2:
visual_text_embedding = torch.cat([attended_sub,
attended_vid,
attended_sub * attended_vid], dim=-1) # (N, 5, Li, Lqa, 3D)
visual_text_embedding = self.concat_fc(visual_text_embedding) # (N, 5, Li, Lqa, D)
out, target, t_scores = self.classfier_head_multi_proposal(
visual_text_embedding, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
elif self.sub_flag:
out, target, t_scores = self.classfier_head_multi_proposal(
attended_sub, attended_sub_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
elif self.vfeat_flag:
out, target, t_scores = self.classfier_head_multi_proposal(
attended_vid, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
else:
raise NotImplementedError
assert len(out) == len(target)
other_outputs["temporal_scores"] = t_scores # (N, 5, Li) or (N, 5, Li, 2)
if self.inference_mode:
inference_outputs = {
"answer": out, # (N, 5)
"t_scores": F.softmax(t_scores, dim=2),
"att_predictions": self.get_att_prediction(
scores=other_outputs["vid_raw_s"],
object_vocab=batch.eval_object_word_ids,
words=batch.qas,
vid_names=batch.vid_name,
qids=batch.qid,
img_indices=batch.image_indices,
boxes=batch.boxes,
start_indices=batch.anno_st_idx,
) if self.vfeat_flag else None,
}
return inference_outputs
att_loss = 0
att_predictions = None
# if (self.use_sup_att or not self.training) and self.vfeat_flag:
if self.use_sup_att and self.training and self.vfeat_flag:
start_indices = batch.anno_st_idx
try:
cur_att_loss, cur_att_predictions = \
self.get_att_loss(other_outputs["vid_raw_s"], batch.att_labels, batch.target, batch.qas,
qids=batch.qid,
q_lens=batch.q_l,
vid_names=batch.vid_name,
img_indices=batch.image_indices,
boxes=batch.boxes,
start_indices=start_indices,
num_negatives=self.num_negatives,
use_hard_negatives=batch.use_hard_negatives,
drop_topk=self.drop_topk)
except AssertionError as e:
save_pickle(
{"batch": batch, "start_indices": start_indices, "vid_raw_s": other_outputs["vid_raw_s"]},
"err_dict.pickle"
)
import sys
sys.exit(1)
att_loss += cur_att_loss
att_predictions = cur_att_predictions
temporal_loss = self.get_ts_loss(temporal_scores=t_scores,
ts_labels=batch.ts_label,
answer_indices=batch.target)
if self.training:
return [out, target], att_loss, att_predictions, temporal_loss, t_scores, other_outputs
else:
return out, att_loss, att_predictions, temporal_loss, F.softmax(t_scores, dim=2), other_outputs
@classmethod
def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):
""" Raw data --> higher-level embedding
Args:
data: (N, L) for text, (N, L, D) for video
data_mask: (N, L)
init_encoder: word_embedding layer for text, MLP (downsize) for video
downsize_encoder: MLP, down project to hsz
input_encoder: multiple layer of encoder block, with residual connection, CNN, layernorm, etc
Returns:
encoded_data: (N, L, D)
"""
data = downsize_encoder(init_encoder(data))
return input_encoder(data, data_mask)
def qa_ctx_attention(self, qa_embed, ctx_embed, qa_mask, ctx_mask, noun_mask, non_visual_vectors):
""" Align image regions with QA words
Args:
qa_embed: (N, 5, 1, Lqa, D)
qa_mask: (N, 5, 1, Lqa)
ctx_embed: (N, 1, Li, Lr, D)
ctx_mask: (N, 1, Li, Lr)
noun_mask: (N, 5, Lqa)
non_visual_vectors: (m, D), m is a tunable parameter
Returns:
"""
num_img, num_region = ctx_mask.shape[2:]
u_a, raw_s, s_mask, s_normalized = self.str_attn(
qa_embed, ctx_embed, qa_mask, ctx_mask,
noun_mask=noun_mask, void_vector=non_visual_vectors) # (N, 5, Li, Lqa, D), (N, 5, Li, Lqa, lr) x2
qa_embed = qa_embed.repeat(1, 1, num_img, 1, 1)
mixed = torch.cat([qa_embed,
u_a,
qa_embed*u_a], dim=-1) # (N, 5, Li, Lqa, D)
mixed = self.c2q_down_projection(mixed) # (N, 5, Li, Lqa, D)
mixed_mask = (s_mask.sum(-1) != 0).float() # (N, 5, Li, Lqa)
return mixed, mixed_mask, raw_s, s_normalized
def get_proposals(self, max_statement, max_statement_mask, temporal_scores,
targets, ts_labels, max_num_proposal=1, iou_thd=0.5, ce_prob_thd=0.01,
extra_span_length=3):
"""
Args:
max_statement: (N, 5, Li, D)
max_statement_mask: (N, 5, Li, 1)
temporal_scores: (N, 5, Li, 2)
targets: (N, )
ts_labels: (N, Li) for frm or N * (st, ed) for st_ed
max_num_proposal:
iou_thd:
ce_prob_thd:
extra_span_length:
Returns:
"""
bsz, num_a, num_img, _ = max_statement_mask.shape
if self.training:
ca_temporal_scores_st_ed = \
temporal_scores[torch.arange(bsz, dtype=torch.long), targets].data # (N, Li, 2)
ca_temporal_scores_st_ed = F.softmax(ca_temporal_scores_st_ed, dim=1) # (N, Li, 2)
ca_pred_spans = find_max_triples(ca_temporal_scores_st_ed[:, :, 0],
ca_temporal_scores_st_ed[:, :, 1],
topN=max_num_proposal,
prob_thd=ce_prob_thd) # N * [(st_idx, ed_idx, confidence), ...]
# +1 for ed index before forward into get_high_iou_spans func.
ca_pred_spans = [[[sub_e[0], sub_e[1] + 1, sub_e[2]] for sub_e in e] for e in ca_pred_spans]
spans = get_high_iou_sapns(zip(ts_labels["st"].tolist(), (ts_labels["ed"] + 1).tolist()),
ca_pred_spans, iou_thd=iou_thd, add_gt=True) # N * [(st, ed), ...]
local_max_max_statement_list = [] # N_new * (5, D)
global_max_max_statement_list = [] # N_new * (5, D)
span_targets = [] # N_new * (1,)
for idx, (t, span_sublist) in enumerate(zip(targets, spans)):
span_targets.extend([t] * len(span_sublist))
cur_global_max_max_statement = \
torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 1)[0]
global_max_max_statement_list.extend([cur_global_max_max_statement] * len(span_sublist))
for span in span_sublist:
span = expand_span(span, expand_length=extra_span_length)
cur_span_max_statement = mask_logits(
max_statement[idx, :, span[0]:span[1]],
max_statement_mask[idx, :, span[0]:span[1]]) # (5, Li[st:ed], D)
local_max_max_statement_list.append(torch.max(cur_span_max_statement, 1)[0]) # (5, D)
local_max_max_statement = torch.stack(local_max_max_statement_list) # (N_new, 5, D)
global_max_max_statement = torch.stack(global_max_max_statement_list) # (N_new, 5, D)
max_max_statement = torch.cat([
local_max_max_statement,
global_max_max_statement], dim=-1) # (N_new, 5, 2D)
return max_max_statement, targets.new_tensor(span_targets) # (N_new, 5, 2D), (N_new, )
else: # testing
temporal_scores_st_ed = F.softmax(temporal_scores, dim=2) # (N, 5, Li, 2)
temporal_scores_st_ed_reshaped = temporal_scores_st_ed.view(bsz * num_a, -1, 2) # (N*5, Li, 2)
pred_spans = find_max_triples(temporal_scores_st_ed_reshaped[:, :, 0],
temporal_scores_st_ed_reshaped[:, :, 1],
topN=1, prob_thd=None) # (N*5) * [(st, ed, confidence), ]
pred_spans = flat_list_of_lists(pred_spans) # (N*5) * (st, ed, confidence)
pred_spans = torch.FloatTensor(pred_spans).to(temporal_scores_st_ed_reshaped.device) # (N*5, 3)
pred_spans, pred_scores = pred_spans[:, :2].long(), pred_spans[:, 2] # (N*5, 2), (N*5, )
pred_spans = [[e[0], e[1] + 1] for e in pred_spans]
max_statement = max_statement.view(bsz * num_a, num_img, -1) # (N*5, Li, D)
max_statement_mask = max_statement_mask.view(bsz * num_a, num_img, -1) # (N*5, Li, 1)
local_max_max_statement_list = [] # N*5 * (D, )
global_max_max_statement_list = [] # N*5 * (D, )
for idx, span in enumerate(pred_spans):
span = expand_span(span, expand_length=extra_span_length)
cur_global_max_max_statement = \
torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 0)[0]
global_max_max_statement_list.append(cur_global_max_max_statement)
cur_span_max_statement = mask_logits(
max_statement[idx, span[0]:span[1]],
max_statement_mask[idx, span[0]:span[1]]) # (Li[st:ed], D), words for span[0] == span[1]
local_max_max_statement_list.append(torch.max(cur_span_max_statement, 0)[0]) # (D, )
local_max_max_statement = torch.stack(local_max_max_statement_list) # (N*5, D)
global_max_max_statement = torch.stack(global_max_max_statement_list) # (N*5, D)
max_max_statement = torch.cat([
local_max_max_statement,
global_max_max_statement], dim=-1) # (N_new, 5, 2D)
return max_max_statement.view(bsz, num_a, -1), targets # (N, 5, 2D), (N, )
def residual_temporal_predictor(self, layer_idx, input_tensor):
"""
Args:
layer_idx (int):
input_tensor: (N, L, D)
Returns:
temporal_score
"""
input_tensor = input_tensor + self.cls_projection_layers[layer_idx](input_tensor) # (N, L, D)
t_score_st = self.temporal_scoring_st_layers[layer_idx](input_tensor) # (N, L, 1)
t_score_ed = self.temporal_scoring_ed_layers[layer_idx](input_tensor) # (N, L, 1)
t_score = torch.cat([t_score_st, t_score_ed], dim=2) # (N, L, 2)
return input_tensor, t_score
def classfier_head_multi_proposal(self, statement, statement_mask, targets, ts_labels, ts_labels_mask,
max_num_proposal=1, ce_prob_thd=0.01, iou_thd=0.5, extra_span_length=3):
"""Predict the probabilities of each statements being true. Statements = QA + Context.
Args:
statement: (N, 5, Li, Lqa, D)
statement_mask: (N, 5, Li, Lqa)
targets: (N, )
ts_labels: (N, Li) for frm or N * (st, ed) for st_ed
ts_labels_mask: (N, Li)
max_num_proposal (int):
ce_prob_thd (float): threshold for p1*p2 (st, ed)
iou_thd (float): threshold for temporal iou
extra_span_length (int): expand the localized span to give a little bit extra context
Returns:
"""
bsz, num_a, num_img, num_words = statement_mask.shape
statement = statement.view(bsz*num_a*num_img, num_words, -1) # (N*5*Li, Lqa, D)
statement_mask = statement_mask.view(bsz*num_a*num_img, num_words) # (N*5*Li, Lqa)
statement = self.cls_encoder(statement, statement_mask) # (N*5*Li, Lqa, D)
max_statement = torch.max(mask_logits(statement, statement_mask.unsqueeze(2)), 1)[0] # (N*5*Li, D)
max_statement_mask = (statement_mask.sum(1) != 0).float().view(bsz, num_a, num_img, 1) # (N, 5, Li, 1)
max_statement = max_statement.view(bsz*num_a, num_img, -1) # (N, 5, Li, D)
t_score_container = []
encoded_max_statement_container = []
encoded_max_statement = max_statement # (N*5, Li, D)
for layer_idx in range(self.t_iter+1):
encoded_max_statement, prev_t_score = \
self.residual_temporal_predictor(layer_idx, encoded_max_statement)
t_score_container.append(prev_t_score.view(bsz, num_a, num_img, 2)) # (N, 5, Li, 2)
encoded_max_statement_container.append(encoded_max_statement) # (N*5, Li, D)
if self.t_iter > 0:
temporal_scores_st_ed = 0.5 * (t_score_container[0] + torch.stack(t_score_container[:1]).mean(0))
else:
temporal_scores_st_ed = t_score_container[0] # (N, 5, Li, 2)
# mask before softmax
temporal_scores_st_ed = mask_logits(temporal_scores_st_ed, ts_labels_mask.view(bsz, 1, num_img, 1))
# when predict answer, only consider 1st level representation !!!
# since the others are all generated from the 1st level
stacked_max_statement = encoded_max_statement_container[0].view(bsz, num_a, num_img, -1) # (N, 5, Li, D)
if self.add_local:
max_max_statement, targets = self.get_proposals(
stacked_max_statement, max_statement_mask, temporal_scores_st_ed,
targets, ts_labels, max_num_proposal=max_num_proposal, iou_thd=iou_thd,
ce_prob_thd=ce_prob_thd, extra_span_length=extra_span_length) # (N, 5, D)
else:
max_max_statement = \
torch.max(mask_logits(stacked_max_statement, max_statement_mask), 2)[0] # (N, 5, D)
# targets = targets
answer_scores = self.classifier(max_max_statement).squeeze(2) # (N, 5)
return answer_scores, targets, temporal_scores_st_ed # (N_new, 5), (N_new, ) (N, 5, Li, 2)
def get_ts_loss(self, temporal_scores, ts_labels, answer_indices):
"""
Args:
temporal_scores: (N, 5, Li, 2)
ts_labels: dict(st=(N, ), ed=(N, ))
answer_indices: (N, )
Returns:
"""
bsz = len(answer_indices)
# compute loss
ca_temporal_scores_st_ed = \
temporal_scores[torch.arange(bsz, dtype=torch.long), answer_indices] # (N, Li, 2)
loss_st = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 0], ts_labels["st"])
loss_ed = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 1], ts_labels["ed"])
return (loss_st + loss_ed) / 2.
@classmethod
def sample_negatives(cls, pred_score, pos_indices, neg_indices, num_negatives=2,
use_hard_negatives=False, negative_pool_size=0, num_hard=2, drop_topk=0):
""" Sample negatives from a set of indices. Several sampling strategies are supported:
1, random; 2, hard negatives; 3, drop_topk hard negatives; 4, mix easy and hard negatives
5, sampling within a pool of hard negatives; 6, sample across images of the same video.
Args:
pred_score: (num_img, num_words, num_region)
pos_indices: (N_pos, 3) all positive region indices for the same word, not necessaryily the same image.
neg_indices: (N_neg, 3) ...
num_negatives (int):
use_hard_negatives (bool):
negative_pool_size (int):
num_hard (int):
drop_topk (int):
Returns:
"""
num_unique_pos = len(pos_indices)
sampled_pos_indices = torch.cat([pos_indices] * num_negatives, dim=0)
if use_hard_negatives:
# print("using use_hard_negatives")
neg_scores = pred_score[neg_indices[:, 0], neg_indices[:, 1], neg_indices[:, 2]] # TODO
max_indices = torch.sort(neg_scores, descending=True)[1].tolist()
if negative_pool_size > num_negatives: # sample from a pool of hard negatives
hard_pool = max_indices[drop_topk:drop_topk + negative_pool_size]
hard_pool_indices = neg_indices[hard_pool]
num_hard_negs = num_negatives
sampled_easy_neg_indices = []
if num_hard < num_negatives:
easy_pool = max_indices[drop_topk + negative_pool_size:]
easy_pool_indices = neg_indices[easy_pool]
num_hard_negs = num_hard
num_easy_negs = num_negatives - num_hard_negs
sampled_easy_neg_indices = easy_pool_indices[
torch.randint(low=0, high=len(easy_pool_indices),
size=(num_easy_negs * num_unique_pos, ), dtype=torch.long)
]
sampled_hard_neg_indices = hard_pool_indices[
torch.randint(low=0, high=len(hard_pool_indices),
size=(num_hard_negs * num_unique_pos, ), dtype=torch.long)
]
if len(sampled_easy_neg_indices) != 0:
sampled_neg_indices = torch.cat([sampled_hard_neg_indices, sampled_easy_neg_indices], dim=0)
else:
sampled_neg_indices = sampled_hard_neg_indices
else: # directly take the top negatives
sampled_neg_indices = neg_indices[max_indices[drop_topk:drop_topk+len(sampled_pos_indices)]]
else:
sampled_neg_indices = neg_indices[
torch.randint(low=0, high=len(neg_indices), size=(len(sampled_pos_indices),), dtype=torch.long)
]
return sampled_pos_indices, sampled_neg_indices
def get_att_loss(self, scores, att_labels, target, words, vid_names, qids, q_lens, img_indices, boxes,
start_indices, num_negatives=2, use_hard_negatives=False, drop_topk=0):
""" compute ranking loss, use for loop to find the indices,
use advanced indexing to perform the real calculation
Build a list contains a quaduple
Args:
scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]
att_labels: list(tensor), each has dimension (#num_imgs, #num_words, #regions), not batched
target: 1D tensor (N, )
words: LongTensor (N, 5, Lqa)
vid_names: list(str) (N,)
qids: list(int), (N, )
q_lens: list(int), (N, )
img_indices: list(list(int)), (N, Li), or None
boxes: list(list(box)) of length N, each sublist represent an image,
each box contains the coordinates of xyxy, or None
num_negatives: number of negatives for each positive region
use_hard_negatives: use hard negatives, uselect negatives with high scores
drop_topk: drop topk highest negatives (since the top negatives might be correct, they are just not labeled)
start_indices (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation. If with_ts, set to zero
Returns:
att_loss: loss value for the batch
att_predictions: (list) [{"gt": gt_scores, "pred": pred_scores}, ], used to calculate att. accuracy
"""
pos_container = [] # contains tuples of 5 elements, which are (batch_i, ca_i, img_i, word_i, region_i)
neg_container = []
for batch_idx in range(len(target)): # batch
ca_idx = target[batch_idx].cpu().item()
gt_score = att_labels[batch_idx] # num_img * (num_words, num_region)
start_idx = start_indices[batch_idx] # int
num_img = len(gt_score)
sen_l, _ = gt_score[0].shape
pred_score = scores[batch_idx, ca_idx, :num_img, :sen_l] # (num_img, num_words, num_region)
# find positive and negative indices
batch_pos_indices = []
batch_neg_indices = []
for img_idx, img_gt_score in enumerate(gt_score):
img_idx = start_idx + img_idx
img_pos_indices = torch.nonzero(img_gt_score) # (N_pos, 2) ==> (#words, #regions)
if len(img_pos_indices) == 0: # skip if no positive indices
continue
img_pos_indices = torch.cat([img_pos_indices.new_full([len(img_pos_indices), 1], img_idx),
img_pos_indices], dim=1) # (N_pos, 3) ==> (#img, #words, #regions)
img_neg_indices = torch.nonzero(img_gt_score == 0) # (N_neg, 2)
img_neg_indices = torch.cat([img_neg_indices.new_full([len(img_neg_indices), 1], img_idx),
img_neg_indices], dim=1) # (N_neg, 3)
batch_pos_indices.append(img_pos_indices)
batch_neg_indices.append(img_neg_indices)
if len(batch_pos_indices) == 0: # skip if empty ==> no gt label for the video
continue
batch_pos_indices = torch.cat(batch_pos_indices, dim=0) # (N_pos, 3) -->
batch_neg_indices = torch.cat(batch_neg_indices, dim=0) # (N_neg, 3)
# sample positives and negatives
available_img_indices = batch_pos_indices[:, 0].unique().tolist()
for img_idx in available_img_indices:
# pos_indices for a certrain img
img_idx_pos_indices = batch_pos_indices[batch_pos_indices[:, 0] == img_idx]
img_idx_neg_indices = batch_neg_indices[batch_neg_indices[:, 0] == img_idx]
available_word_indices = img_idx_pos_indices[:, 1].unique().tolist()
for word_idx in available_word_indices:
# positives and negatives for a given image-word pair, specified by img_idx-word_idx
img_idx_word_idx_pos_indices = img_idx_pos_indices[img_idx_pos_indices[:, 1] == word_idx]
img_idx_word_idx_neg_indices = img_idx_neg_indices[img_idx_neg_indices[:, 1] == word_idx]
# actually all the positives, not sampled pos
sampled_pos_indices, sampled_neg_indices = \
self.sample_negatives(pred_score,
img_idx_word_idx_pos_indices, img_idx_word_idx_neg_indices,
num_negatives=num_negatives, use_hard_negatives=use_hard_negatives,
negative_pool_size=self.negative_pool_size,
num_hard=self.num_hard, drop_topk=drop_topk)
base_indices = torch.LongTensor([[batch_idx, ca_idx]] * len(sampled_pos_indices)).\
to(sampled_pos_indices.device)
pos_container.append(torch.cat([base_indices, sampled_pos_indices], dim=1))
neg_container.append(torch.cat([base_indices, sampled_neg_indices], dim=1))
pos_container = torch.cat(pos_container, dim=0)
neg_container = torch.cat(neg_container, dim=0)
# contain all the predictions and gt labels in this batch, only consider the ones with gt labels
# also only consider the positive answer.
att_predictions = None
if not self.training and self.vfeat_flag:
att_predictions = dict(det_q=[],
det_ca=[])
unique_pos_container = np.unique(pos_container.cpu().numpy(), axis=0) # unique rows in the array
for row in unique_pos_container:
batch_idx, ca_idx, img_idx, word_idx, region_idx = row
start_idx = start_indices[batch_idx] # int
cur_q_len = q_lens[batch_idx]
num_region = att_labels[batch_idx][img_idx-start_idx].shape[1] # num_img * (num_words, num_region)
if len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()) != \
len(boxes[batch_idx][img_idx-start_idx]):
print("scores[batch_idx, ca_idx, img_idx, word_idx].data.cpu()",
len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()))
print("len(boxes[batch_idx][img_idx-start_idx])", len(boxes[batch_idx][img_idx-start_idx]))
print("boxes, batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx",
batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx)
print(row)
raise AssertionError
cur_det_data = {
"pred": scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu(),
"word": words[batch_idx, ca_idx, word_idx],
"qid": qids[batch_idx],
"vid_name": vid_names[batch_idx],
"img_idx": img_indices[batch_idx][img_idx], # full indices
"boxes": boxes[batch_idx][img_idx-start_idx] # located boxes
}
if word_idx < cur_q_len:
att_predictions["det_q"].append(cur_det_data)
else:
att_predictions["det_ca"].append(cur_det_data)
pos_scores = scores[pos_container[:, 0], pos_container[:, 1], pos_container[:, 2],
pos_container[:, 3], pos_container[:, 4]]
neg_scores = scores[neg_container[:, 0], neg_container[:, 1], neg_container[:, 2],
neg_container[:, 3], neg_container[:, 4]]
if self.att_loss_type == "hinge":
# max(0, m + S_pos - S_neg)
att_loss = torch.clamp(self.margin + neg_scores - pos_scores, min=0).sum()
elif self.att_loss_type == "lse":
# log[1 + exp(scale * (S_pos - S_neg))]
att_loss = torch.log1p(torch.exp(self.alpha * (neg_scores - pos_scores))).sum()
else:
raise NotImplementedError("Only support hinge and lse")
return att_loss, att_predictions
def get_att_prediction(self, scores, object_vocab, words, vid_names, qids, img_indices, boxes,
start_indices, score_thd=0.2):
""" compute ranking loss, use for loop to find the indices,
use advanced indexing to perform the real calculation
Build a list contains a quaduple
Args:
scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]
object_vocab: list, object word ids in the vocabulary
words: LongTensor (N, 5, Lqa)
vid_names: list(str) (N,)
qids: list(int), (N, )
img_indices: list(list(int)), (N, Li), or None
boxes: list(list(box)) of length N, each sublist represent an image,
each box contains the coordinates of xyxy, or None
start_indices (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation. If with_ts, set to zero
score_thd: only keep boxes with score higher than this value
Returns:
att_loss: loss value for the batch
att_predictions: (list) [{"gt": gt_scores, "pred": pred_scores}, ], used to calculate att. accuracy
"""
# contain all the predictions and gt labels in this batch, only consider the ones with gt labels
# also only consider the positive answer.
att_predictions = None
if self.vfeat_flag:
att_predictions = []
for batch_idx in range(len(scores)):
start_idx = start_indices[batch_idx] # int
q_att_predictions = dict() # predictions associated with this question
for ans_idx in range(5):
q_att_predictions[ans_idx] = []
for img_idx_local in range(len(boxes[batch_idx])):
# img_idx_local: for the imgs with box anno
# img_idx_global: for all the imgs, including ones without box anno
img_idx_global = img_idx_local + start_idx
cur_img_scores = scores[batch_idx, ans_idx, img_idx_global] # (Lqa, Lr)
cur_words = words[batch_idx, ans_idx].tolist() # (Lqa, )
cur_img_boxes = boxes[batch_idx][img_idx_local]
for word_idx, w in enumerate(cur_words):
if w in object_vocab:
cur_word_region_scores = cur_img_scores[word_idx].data.cpu().numpy() # (Lr, )
accepted_region_ids = np.nonzero(cur_word_region_scores >= score_thd)[0].tolist()
accepted_region_scores = [float(cur_word_region_scores[i]) for i in accepted_region_ids]
accepted_region_boxes = [cur_img_boxes[i] for i in accepted_region_ids]
sorted_indices = np.argsort(accepted_region_scores)
accepted_region_scores = [accepted_region_scores[i] for i in sorted_indices]
accepted_region_boxes = [accepted_region_boxes[i] for i in sorted_indices]
cur_det_data = {
"pred": accepted_region_scores,
"bbox": accepted_region_boxes,
"word": int(words[batch_idx, ans_idx, word_idx]),
"qid": int(qids[batch_idx]),
"vid_name": vid_names[batch_idx],
"img_idx": img_indices[batch_idx][img_idx_global], # image file name id
}
q_att_predictions[ans_idx].append(cur_det_data)
att_predictions.append(q_att_predictions)
return att_predictions
|
from basic_functions import *
import csv
from collections import deque
inf = 1000
def table_phase0():
trans_ep = []
with open('trans_ep_phase0.csv', mode='r') as f:
for line in map(str.strip, f):
trans_ep.append([int(i) for i in line.replace('\n', '').split(',')])
trans = []
with open('trans_co.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(2187)] for _ in range(495)] for _ in range(3)]
solved1 = ep2idx_phase0(list(range(12)))
solved2 = co2idx([0 for _ in range(8)])
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
sorted_candidate = sorted(list(candidate[0]))
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase0_co_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
trans = []
with open('trans_eo.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(2048)] for _ in range(495)] for _ in range(3)]
solved1 = ep2idx_phase0(list(range(12)))
solved2 = eo2idx([0 for _ in range(12)])
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
n_last_rotated = False
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, n_last_rotated])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
n_last_rotated = True
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, n_last_rotated])
for i in range(3):
with open('prun_phase0_eo_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
def table_phase1():
trans_ep = []
with open('trans_ep_phase1_2.csv', mode='r') as f:
for line in map(str.strip, f):
trans_ep.append([int(i) for i in line.replace('\n', '').split(',')])
trans = []
with open('trans_cp.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(40320)] for _ in range(24)] for _ in range(3)]
solved1 = ep2idx_phase1_2(list(range(12)))
solved2 = cp2idx(list(range(8)))
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
sorted_candidate = sorted(list(candidate[1]))
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase1_cp_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
trans = []
with open('trans_ep_phase1_1.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(40320)] for _ in range(24)] for _ in range(3)]
solved1 = ep2idx_phase1_2(list(range(12)))
solved2 = ep2idx_phase1_1(list(range(12)))
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase1_ep_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
table_phase0()
table_phase1()
|
def numOfAbled(L,k):
hori=0
verti=0
for i in range(len(L)):
for j in range(len(L)-k+1):
num=0
for m in range(k):
if L[i][j+m]!=1:
break
else :
num+=1
if num==k:
if j==len(L)-k:
if L[i][j-1]==0 :
verti+=1
elif j==0:
if L[i][j+k]==0:
verti+=1
elif L[i][j+k]==0 and L[i][j-1]==0:
verti+=1
for j in range(len(L)):
for i in range(len(L)-k+1):
num=0
for m in range(k):
if L[i+m][j]!=1:
break
else :
num+=1
if num==k:
if i==len(L)-k:
if L[i-1][j]==0:
hori+=1
elif i==0 :
if L[i+k][j]==0:
hori+=1
elif L[i-1][j]==0 and L[i+k][j]==0:
hori+=1
return verti+hori
for t in range(int(input())):
L=[]
[N,K] =list(map(int,input().split()))
for n in range(N):
L.append(list(map(int,input().split())))
print(f"#{t+1} {numOfAbled(L,K)}")
|
#!/usr/bin/env python
# Author: Dr. Konstantin Selyunin
# License: MIT
# Created: 2020.08.19
import logging
import os.path
import struct
from abc import abstractmethod, ABC
from typing import Union, Tuple
from .rsl_xml_svd.rsl_svd_parser import RslSvdParser
class ShearWaterRegisters(ABC):
def __init__(self, **kwargs):
self.svd_parser = RslSvdParser(svd_file=ShearWaterRegisters.find_svd('shearwater.svd'))
@staticmethod
def find_svd(svd_file_name: str):
parent_dir = os.path.join(os.path.dirname(__file__), os.pardir)
for root, dirs, files in os.walk(parent_dir):
if svd_file_name in files:
return os.path.join(root, svd_file_name)
@abstractmethod
def connect(self, *args, **kwargs):
pass
@abstractmethod
def read_register(self, reg_addr: int, **kw) -> Tuple[bool, bytes]:
pass
@abstractmethod
def write_register(self, reg_addr: int, reg_value: Union[int, bytes, float, str], **kw):
pass
@property
def creg_com_settings(self):
"""
The CREG_COM_SETTINGS register is used to set the boards serial port baud rate and to enable (disable) the
automatic transmission of sensor data and estimated states (telemetry).
Payload structure:
[31:28] : BAUD_RATE -- Sets the baud rate of the boards main serial port:
:return: BAUD_RATE as bitField;
"""
addr = 0x00
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for BAUD_RATE bit field
baud_rate_val = (reg.raw_value >> 28) & 0x000F
baud_rate_enum = reg.find_field_by(name='BAUD_RATE').find_enum_entry_by(value=baud_rate_val)
return reg, baud_rate_enum
@creg_com_settings.setter
def creg_com_settings(self, new_value):
addr = 0x00
self.write_register(addr, new_value)
@property
def creg_com_rates1(self):
"""
The CREG_COM_RATES1 register sets desired telemetry transmission rates in Hz for raw accelerometer 1, gyro 1,
gyro 2 and magnetometer 1 data. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : RAW_ACCEL_1_RATE -- Specifies the desired raw accelerometer 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_GYRO_1_RATE -- Specifies the desired raw gyro 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz
[15:8] : RAW_GYRO_2_RATE -- Specifies the desired raw gyro 2 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : RAW_MAG_1_RATE -- Specifies the desired raw magnetometer 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: RAW_ACCEL_1_RATE as uint8_t; RAW_GYRO_1_RATE as uint8_t; RAW_GYRO_2_RATE as uint8_t; RAW_MAG_1_RATE as uint8_t;
"""
addr = 0x01
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate
@creg_com_rates1.setter
def creg_com_rates1(self, new_value):
addr = 0x01
self.write_register(addr, new_value)
@property
def creg_com_rates2(self):
"""
The CREG_COM_RATES2 register sets desired telemetry transmission rates for the magnetometer 2, all raw data,
and temperature data rate. The ALL_RAW_RATE setting has higher priority over the individual raw sensor data
settings, i.e. whenever this bitfield is set, then the individual raw sensor settings are ignored and not
used. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : TEMP_RATE -- Specifies the desired broadcast rate for temperature data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_MAG_2_RATE -- Specifies the desired raw magnetometer 2 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_RAW_RATE -- Specifies the desired broadcast rate for all raw sensor data. If set, this overrides the broadcast rate setting for individual raw data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: TEMP_RATE as uint8_t; RAW_MAG_2_RATE as uint8_t; ALL_RAW_RATE as uint8_t;
"""
addr = 0x02
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
temp_rate, raw_mag_2_rate, all_raw_rate = struct.unpack('>BBxB', payload[0:4])
return reg, temp_rate, raw_mag_2_rate, all_raw_rate
@creg_com_rates2.setter
def creg_com_rates2(self, new_value):
addr = 0x02
self.write_register(addr, new_value)
@property
def creg_com_rates3(self):
"""
The CREG_COM_RATES3 register sets desired telemetry transmission rates for processed sensor data for the
sensors: the accelerometer 1, gyro 1, gyro 2, and magnetometer 1. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : PROC_ACCEL_1_RATE -- Specifies the desired broadcast rate for processed accelerometer 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : PROC_GYRO_1_RATE -- Specifies the desired broadcast rate for processed rate gyro 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : PROC_GYRO_2_RATE -- Specifies the desired broadcast rate for processed processed rate gyro 2 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : PROC_MAG_1_RATE -- Specifies the desired broadcast rate for processed magnetometer 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_ACCEL_1_RATE as uint8_t; PROC_GYRO_1_RATE as uint8_t; PROC_GYRO_2_RATE as uint8_t; PROC_MAG_1_RATE as uint8_t;
"""
addr = 0x03
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES3')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate
@creg_com_rates3.setter
def creg_com_rates3(self, new_value):
addr = 0x03
self.write_register(addr, new_value)
@property
def creg_com_rates4(self):
"""
The CREG_COM_RATES4 register defines the desired telemetry transmission rates for the processed data for the
magnetometer 2, and for all processed data. The ALL_PROC_RATE setting has higher priority over the individual
processed sensor data settings, i.e. whenever this bitfield is set, then the individual processed sensor
transmission rate settings are ignored and not used. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : PROC_MAG_2_RATE -- Specifies the desired broadcast rate for processed magnetometer 2 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_PROC_RATE -- Specifies the desired broadcast rate for raw all processed sensor data. If set, this overrides the broadcast rate setting for individual processed data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_MAG_2_RATE as uint8_t; ALL_PROC_RATE as uint8_t;
"""
addr = 0x04
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES4')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_mag_2_rate, all_proc_rate = struct.unpack('>BxxB', payload[0:4])
return reg, proc_mag_2_rate, all_proc_rate
@creg_com_rates4.setter
def creg_com_rates4(self, new_value):
addr = 0x04
self.write_register(addr, new_value)
@property
def creg_com_rates5(self):
"""
The CREG_COM_RATES5 register sets desired telemetry transmission rates for quaternions, Euler Angles,
position, and velocity estimates. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : QUAT_RATE -- Specifies the desired broadcast rate for quaternion data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : EULER_RATE -- Specifies the desired broadcast rate for Euler Angle data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : POSITION_RATE -- Specifies the desired broadcast rate position. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : VELOCITY_RATE -- Specifies the desired broadcast rate for velocity. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: QUAT_RATE as uint8_t; EULER_RATE as uint8_t; POSITION_RATE as uint8_t; VELOCITY_RATE as uint8_t;
"""
addr = 0x05
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES5')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_rate, euler_rate, position_rate, velocity_rate = struct.unpack('>BBBB', payload[0:4])
return reg, quat_rate, euler_rate, position_rate, velocity_rate
@creg_com_rates5.setter
def creg_com_rates5(self, new_value):
addr = 0x05
self.write_register(addr, new_value)
@property
def creg_com_rates6(self):
"""
The CREG_COM_RATES6 register sets desired telemetry transmission rates for pose (Euler/position packet),
health, and gyro bias estimates for the gyro 1 and gyro 2. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : POSE_RATE -- Specifies the desired broadcast rate for pose (Euler Angle and position) data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[19:16] : HEALTH_RATE -- Specifies the desired broadcast rate for the sensor health packet.
[15:8] : GYRO_BIAS_1_RATE -- Specifies the desired broadcast rate for gyro 1 bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : GYRO_BIAS_2_RATE -- Specifies the desired broadcast rate for gyro 2 bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: POSE_RATE as uint8_t; HEALTH_RATE as bitField; GYRO_BIAS_1_RATE as uint8_t; GYRO_BIAS_2_RATE as uint8_t;
"""
addr = 0x06
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
pose_rate, gyro_bias_1_rate, gyro_bias_2_rate = struct.unpack('>BxBB', payload[0:4])
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for HEALTH_RATE bit field
health_rate_val = (reg.raw_value >> 16) & 0x000F
health_rate_enum = reg.find_field_by(name='HEALTH_RATE').find_enum_entry_by(value=health_rate_val)
return reg, pose_rate, gyro_bias_1_rate, gyro_bias_2_rate, reg, health_rate_enum
@creg_com_rates6.setter
def creg_com_rates6(self, new_value):
addr = 0x06
self.write_register(addr, new_value)
@property
def creg_com_rates7(self):
"""
The CREG_COM_RATES7 register sets desired telemetry transmission rates in Hz for NMEA packets.
Payload structure:
[31:28] : NMEA_HEALTH_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style health packet.
[27:24] : NMEA_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style pose (Euler Angle/position) packet.
[23:20] : NMEA_ATTITUDE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style attitude packet.
[19:16] : NMEA_SENSOR_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style sensor data packet.
[15:12] : NMEA_RATES_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style rate data packet.
[11:8] : NMEA_GPS_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style GPS pose packet.
[7:4] : NMEA_QUAT_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style quaternion packet.
:return: NMEA_HEALTH_RATE as bitField; NMEA_POSE_RATE as bitField; NMEA_ATTITUDE_RATE as bitField; NMEA_SENSOR_RATE as bitField; NMEA_RATES_RATE as bitField; NMEA_GPS_POSE_RATE as bitField; NMEA_QUAT_RATE as bitField;
"""
addr = 0x07
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES7')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for NMEA_HEALTH_RATE bit field
nmea_health_rate_val = (reg.raw_value >> 28) & 0x000F
nmea_health_rate_enum = reg.find_field_by(name='NMEA_HEALTH_RATE').find_enum_entry_by(value=nmea_health_rate_val)
# find value for NMEA_POSE_RATE bit field
nmea_pose_rate_val = (reg.raw_value >> 24) & 0x000F
nmea_pose_rate_enum = reg.find_field_by(name='NMEA_POSE_RATE').find_enum_entry_by(value=nmea_pose_rate_val)
# find value for NMEA_ATTITUDE_RATE bit field
nmea_attitude_rate_val = (reg.raw_value >> 20) & 0x000F
nmea_attitude_rate_enum = reg.find_field_by(name='NMEA_ATTITUDE_RATE').find_enum_entry_by(value=nmea_attitude_rate_val)
# find value for NMEA_SENSOR_RATE bit field
nmea_sensor_rate_val = (reg.raw_value >> 16) & 0x000F
nmea_sensor_rate_enum = reg.find_field_by(name='NMEA_SENSOR_RATE').find_enum_entry_by(value=nmea_sensor_rate_val)
# find value for NMEA_RATES_RATE bit field
nmea_rates_rate_val = (reg.raw_value >> 12) & 0x000F
nmea_rates_rate_enum = reg.find_field_by(name='NMEA_RATES_RATE').find_enum_entry_by(value=nmea_rates_rate_val)
# find value for NMEA_GPS_POSE_RATE bit field
nmea_gps_pose_rate_val = (reg.raw_value >> 8) & 0x000F
nmea_gps_pose_rate_enum = reg.find_field_by(name='NMEA_GPS_POSE_RATE').find_enum_entry_by(value=nmea_gps_pose_rate_val)
# find value for NMEA_QUAT_RATE bit field
nmea_quat_rate_val = (reg.raw_value >> 4) & 0x000F
nmea_quat_rate_enum = reg.find_field_by(name='NMEA_QUAT_RATE').find_enum_entry_by(value=nmea_quat_rate_val)
return reg, nmea_health_rate_enum, nmea_pose_rate_enum, nmea_attitude_rate_enum, nmea_sensor_rate_enum, nmea_rates_rate_enum, nmea_gps_pose_rate_enum, nmea_quat_rate_enum
@creg_com_rates7.setter
def creg_com_rates7(self, new_value):
addr = 0x07
self.write_register(addr, new_value)
@property
def creg_misc_settings(self):
"""
This register contains miscellaneous filter and sensor control options.
Payload structure:
[8] : PPS -- If set, this bit causes the TX2 pin on the IO Expansion header to be used as the PPS input from an external GPS module. PPS pulses will then be used to synchronize the system clock to UTC time of day.
[3] : ZG -- If set, this bit causes the devicee to attempt to measure the rate gyro bias on startup. The sensor must be stationary on startup for this feature to work properly.
[2] : Q -- If this bit is set, the sensor will run in quaternion mode instead of Euler Angle mode.
[1] : MAG1 -- If set, the magnetometer 1 will be used in state updates.
[0] : MAG2 -- If set, the magnetometer 2 will be used in state updates.
:return: PPS as bitField; ZG as bitField; Q as bitField; MAG1 as bitField; MAG2 as bitField;
"""
addr = 0x08
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MISC_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for PPS bit field
pps_val = (reg.raw_value >> 8) & 0x0001
pps_enum = reg.find_field_by(name='PPS').find_enum_entry_by(value=pps_val)
# find value for ZG bit field
zg_val = (reg.raw_value >> 3) & 0x0001
zg_enum = reg.find_field_by(name='ZG').find_enum_entry_by(value=zg_val)
# find value for Q bit field
q_val = (reg.raw_value >> 2) & 0x0001
q_enum = reg.find_field_by(name='Q').find_enum_entry_by(value=q_val)
# find value for MAG1 bit field
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
# find value for MAG2 bit field
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, pps_enum, zg_enum, q_enum, mag1_enum, mag2_enum
@creg_misc_settings.setter
def creg_misc_settings(self, new_value):
addr = 0x08
self.write_register(addr, new_value)
@property
def creg_gyro_1_meas_range(self):
"""
The CREG_GYRO_1_MEAS_RANGE register sets the desired measurement range for the gyro 1 sensor. If the rate is
not set, then the default value of 2000 deg/s will be used as a measurement range.
Payload structure:
[1:0] : MEAS_GYRO1 -- Specifies the desired measurement range for the gyro 1 measurements.
:return: MEAS_GYRO1 as bitField;
"""
addr = 0x09
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_GYRO1 bit field
meas_gyro1_val = (reg.raw_value >> 0) & 0x0003
meas_gyro1_enum = reg.find_field_by(name='MEAS_GYRO1').find_enum_entry_by(value=meas_gyro1_val)
return reg, meas_gyro1_enum
@creg_gyro_1_meas_range.setter
def creg_gyro_1_meas_range(self, new_value):
addr = 0x09
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_x(self):
"""
This register sets the x-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_X as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_x,
@creg_gyro_1_trim_x.setter
def creg_gyro_1_trim_x(self, new_value):
addr = 0x0A
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_y(self):
"""
This register sets the y-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_Y as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_y,
@creg_gyro_1_trim_y.setter
def creg_gyro_1_trim_y(self, new_value):
addr = 0x0B
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_z(self):
"""
This register sets the z-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_Z as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_z,
@creg_gyro_1_trim_z.setter
def creg_gyro_1_trim_z(self, new_value):
addr = 0x0C
self.write_register(addr, new_value)
@property
def creg_gyro_2_meas_range(self):
"""
The CREG_GYRO_2_MEAS_RANGE register sets the desired measurement range for the gyro 2 sensor. If the rate is
not set, then the default value of 2000 deg/s will be used as a measurement range.
Payload structure:
[1:0] : MEAS_GYRO2 -- Specifies the desired measurement range for the gyro 2 measurements.
:return: MEAS_GYRO2 as bitField;
"""
addr = 0x0D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_GYRO2 bit field
meas_gyro2_val = (reg.raw_value >> 0) & 0x0003
meas_gyro2_enum = reg.find_field_by(name='MEAS_GYRO2').find_enum_entry_by(value=meas_gyro2_val)
return reg, meas_gyro2_enum
@creg_gyro_2_meas_range.setter
def creg_gyro_2_meas_range(self, new_value):
addr = 0x0D
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_x(self):
"""
This register sets the x-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_X as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_x,
@creg_gyro_2_trim_x.setter
def creg_gyro_2_trim_x(self, new_value):
addr = 0x0E
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_y(self):
"""
This register sets the y-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_Y as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_y,
@creg_gyro_2_trim_y.setter
def creg_gyro_2_trim_y(self, new_value):
addr = 0x0F
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_z(self):
"""
This register sets the z-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_Z as float;
"""
addr = 0x10
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_z,
@creg_gyro_2_trim_z.setter
def creg_gyro_2_trim_z(self, new_value):
addr = 0x10
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_1(self):
"""
Row 1, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_1 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_1,
@creg_mag_1_cal1_1.setter
def creg_mag_1_cal1_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_2(self):
"""
Row 1, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_2 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_2,
@creg_mag_1_cal1_2.setter
def creg_mag_1_cal1_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_3(self):
"""
Row 1, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_3 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_3,
@creg_mag_1_cal1_3.setter
def creg_mag_1_cal1_3(self, new_value):
addr = 0x13
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_1(self):
"""
Row 2, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_1 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_1,
@creg_mag_1_cal2_1.setter
def creg_mag_1_cal2_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_2(self):
"""
Row 2, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_2 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_2,
@creg_mag_1_cal2_2.setter
def creg_mag_1_cal2_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_3(self):
"""
Row 2, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_3 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_3,
@creg_mag_1_cal2_3.setter
def creg_mag_1_cal2_3(self, new_value):
addr = 0x16
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_1(self):
"""
Row 3, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_1 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_1,
@creg_mag_1_cal3_1.setter
def creg_mag_1_cal3_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_2(self):
"""
Row 3, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_2 as float;
"""
addr = 0x18
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_2,
@creg_mag_1_cal3_2.setter
def creg_mag_1_cal3_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_3(self):
"""
Row 3, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_3 as float;
"""
addr = 0x19
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_3,
@creg_mag_1_cal3_3.setter
def creg_mag_1_cal3_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_x(self):
"""
This register stores a bias term for the magnetometer 1 x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_X as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_x,
@creg_mag_1_bias_x.setter
def creg_mag_1_bias_x(self, new_value):
addr = 0x1A
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_y(self):
"""
This register stores a bias term for the magnetometer 1 y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_Y as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_y,
@creg_mag_1_bias_y.setter
def creg_mag_1_bias_y(self, new_value):
addr = 0x1B
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_z(self):
"""
This register stores a bias term for the magnetometer 1 z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_Z as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_z,
@creg_mag_1_bias_z.setter
def creg_mag_1_bias_z(self, new_value):
addr = 0x1C
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_1(self):
"""
Row 1, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_1 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_1,
@creg_mag_2_cal1_1.setter
def creg_mag_2_cal1_1(self, new_value):
addr = 0x1D
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_2(self):
"""
Row 1, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_2 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_2,
@creg_mag_2_cal1_2.setter
def creg_mag_2_cal1_2(self, new_value):
addr = 0x1E
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_3(self):
"""
Row 1, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_3 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_3,
@creg_mag_2_cal1_3.setter
def creg_mag_2_cal1_3(self, new_value):
addr = 0x1F
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_1(self):
"""
Row 2, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_1 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_1,
@creg_mag_2_cal2_1.setter
def creg_mag_2_cal2_1(self, new_value):
addr = 0x20
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_2(self):
"""
Row 2, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_2 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_2,
@creg_mag_2_cal2_2.setter
def creg_mag_2_cal2_2(self, new_value):
addr = 0x21
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_3(self):
"""
Row 2, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_3 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_3,
@creg_mag_2_cal2_3.setter
def creg_mag_2_cal2_3(self, new_value):
addr = 0x22
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_1(self):
"""
Row 3, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_1 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_1,
@creg_mag_2_cal3_1.setter
def creg_mag_2_cal3_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_2(self):
"""
Row 3, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_2 as float;
"""
addr = 0x24
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_2,
@creg_mag_2_cal3_2.setter
def creg_mag_2_cal3_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_3(self):
"""
Row 3, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_3 as float;
"""
addr = 0x25
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_3,
@creg_mag_2_cal3_3.setter
def creg_mag_2_cal3_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_x(self):
"""
This register stores a bias term for the magnetometer 2 x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_X as float;
"""
addr = 0x26
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_x,
@creg_mag_2_bias_x.setter
def creg_mag_2_bias_x(self, new_value):
addr = 0x26
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_y(self):
"""
This register stores a bias term for the magnetometer 2 y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_Y as float;
"""
addr = 0x27
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_y,
@creg_mag_2_bias_y.setter
def creg_mag_2_bias_y(self, new_value):
addr = 0x27
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_z(self):
"""
This register stores a bias term for the magnetometer 2 z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_Z as float;
"""
addr = 0x28
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_z,
@creg_mag_2_bias_z.setter
def creg_mag_2_bias_z(self, new_value):
addr = 0x28
self.write_register(addr, new_value)
@property
def creg_accel_1_meas_range(self):
"""
The CREG_ACCEL_1_MEAS_RANGE register sets the desired measurement range for the accelerometer 1. If the rate
is not set, then the default value of the +-2 g will be used as a measurement range.
Payload structure:
[1:0] : MEAS_ACC1 -- Specifies the desired measurement range for the accelerometer 1 measurements.
:return: MEAS_ACC1 as bitField;
"""
addr = 0x29
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_ACC1 bit field
meas_acc1_val = (reg.raw_value >> 0) & 0x0003
meas_acc1_enum = reg.find_field_by(name='MEAS_ACC1').find_enum_entry_by(value=meas_acc1_val)
return reg, meas_acc1_enum
@creg_accel_1_meas_range.setter
def creg_accel_1_meas_range(self, new_value):
addr = 0x29
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_1(self):
"""
Row 1, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_1 as float;
"""
addr = 0x2A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_1,
@creg_accel_1_cal1_1.setter
def creg_accel_1_cal1_1(self, new_value):
addr = 0x2A
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_2(self):
"""
Row 1, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_2 as float;
"""
addr = 0x2B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_2,
@creg_accel_1_cal1_2.setter
def creg_accel_1_cal1_2(self, new_value):
addr = 0x2B
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_3(self):
"""
Row 1, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_3 as float;
"""
addr = 0x2C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_3,
@creg_accel_1_cal1_3.setter
def creg_accel_1_cal1_3(self, new_value):
addr = 0x2C
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_1(self):
"""
Row 2, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_1 as float;
"""
addr = 0x2D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_1,
@creg_accel_1_cal2_1.setter
def creg_accel_1_cal2_1(self, new_value):
addr = 0x2D
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_2(self):
"""
Row 2, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_2 as float;
"""
addr = 0x2E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_2,
@creg_accel_1_cal2_2.setter
def creg_accel_1_cal2_2(self, new_value):
addr = 0x2E
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_3(self):
"""
Row 2, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_3 as float;
"""
addr = 0x2F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_3,
@creg_accel_1_cal2_3.setter
def creg_accel_1_cal2_3(self, new_value):
addr = 0x2F
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_1(self):
"""
Row 3, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_1 as float;
"""
addr = 0x30
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_1,
@creg_accel_1_cal3_1.setter
def creg_accel_1_cal3_1(self, new_value):
addr = 0x30
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_2(self):
"""
Row 3, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_2 as float;
"""
addr = 0x31
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_2,
@creg_accel_1_cal3_2.setter
def creg_accel_1_cal3_2(self, new_value):
addr = 0x31
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_3(self):
"""
Row 3, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_3 as float;
"""
addr = 0x32
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_3,
@creg_accel_1_cal3_3.setter
def creg_accel_1_cal3_3(self, new_value):
addr = 0x32
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_x(self):
"""
This register stores a bias term for the accelerometer 1 x-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_X -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_X as float;
"""
addr = 0x33
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_x,
@creg_accel_1_bias_x.setter
def creg_accel_1_bias_x(self, new_value):
addr = 0x33
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_y(self):
"""
This register stores a bias term for the accelerometer 1 y-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_Y as float;
"""
addr = 0x34
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_y,
@creg_accel_1_bias_y.setter
def creg_accel_1_bias_y(self, new_value):
addr = 0x34
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_z(self):
"""
This register stores a bias term for the accelerometer 1 z-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_Z as float;
"""
addr = 0x35
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_z,
@creg_accel_1_bias_z.setter
def creg_accel_1_bias_z(self, new_value):
addr = 0x35
self.write_register(addr, new_value)
@property
def dreg_health(self):
"""
The health register reports the current status of the sensors on the board. Monitoring the health register is
the easiest way to watch for other problems that could affect the behavior of the board, status of the
sensors. The analogous to the health register, the status of the GPS signal can be monitored in the
DREG_GPS_HEALTH
Payload structure:
[8] : OVF -- Overflow bit. This bit is set if the board is attempting to transmit data over the serial port faster than is allowed given the baud-rate. If this bit is set, reduce broadcast rates in the COM_RATES registers.
[7] : ACC1_N -- This bit is set if the sensor detects that the norm of the accelerometer measurement is too far away from 1G to be used (i.e. during aggressive acceleration or high vibration).
[6] : MAG1_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement for the magnetometer 1 is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[5] : MAG2_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement for the magnetometer 2 is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[4] : ACCEL1 -- This bit will be set if the accelerometer 1 fails to initialize on startup.
[3] : GYRO1 -- This bit will be set if the rate gyro 1 fails to initialize on startup.
[2] : GYRO2 -- This bit will be set if the rate gyro 2 fails to initialize on startup.
[1] : MAG1 -- This bit will be set if the magnetometer 1 fails to initialize on startup.
[0] : MAG2 -- This bit will be set if the magnetometer 2 fails to initialize on startup.
:return: OVF as bitField; ACC1_N as bitField; MAG1_N as bitField; MAG2_N as bitField; ACCEL1 as bitField; GYRO1 as bitField; GYRO2 as bitField; MAG1 as bitField; MAG2 as bitField;
"""
addr = 0x55
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_HEALTH')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for OVF bit field
ovf_val = (reg.raw_value >> 8) & 0x0001
ovf_enum = reg.find_field_by(name='OVF').find_enum_entry_by(value=ovf_val)
# find value for ACC1_N bit field
acc1_n_val = (reg.raw_value >> 7) & 0x0001
acc1_n_enum = reg.find_field_by(name='ACC1_N').find_enum_entry_by(value=acc1_n_val)
# find value for MAG1_N bit field
mag1_n_val = (reg.raw_value >> 6) & 0x0001
mag1_n_enum = reg.find_field_by(name='MAG1_N').find_enum_entry_by(value=mag1_n_val)
# find value for MAG2_N bit field
mag2_n_val = (reg.raw_value >> 5) & 0x0001
mag2_n_enum = reg.find_field_by(name='MAG2_N').find_enum_entry_by(value=mag2_n_val)
# find value for ACCEL1 bit field
accel1_val = (reg.raw_value >> 4) & 0x0001
accel1_enum = reg.find_field_by(name='ACCEL1').find_enum_entry_by(value=accel1_val)
# find value for GYRO1 bit field
gyro1_val = (reg.raw_value >> 3) & 0x0001
gyro1_enum = reg.find_field_by(name='GYRO1').find_enum_entry_by(value=gyro1_val)
# find value for GYRO2 bit field
gyro2_val = (reg.raw_value >> 2) & 0x0001
gyro2_enum = reg.find_field_by(name='GYRO2').find_enum_entry_by(value=gyro2_val)
# find value for MAG1 bit field
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
# find value for MAG2 bit field
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, ovf_enum, acc1_n_enum, mag1_n_enum, mag2_n_enum, accel1_enum, gyro1_enum, gyro2_enum, mag1_enum, mag2_enum
@property
def dreg_gyro_1_raw_xy(self):
"""
Contains raw X and Y axis rate gyro 1 data.
Payload structure:
[31:16] : GYRO_1_RAW_X -- Gyro X (2s complement 16-bit integer)
[15:0] : GYRO_1_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_1_RAW_X as int16_t; GYRO_1_RAW_Y as int16_t;
"""
addr = 0x56
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_1_raw_x, gyro_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_1_raw_x, gyro_1_raw_y
@property
def dreg_gyro_1_raw_z(self):
"""
Contains raw Z axis rate gyro 1 data.
Payload structure:
[31:16] : GYRO_1_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_1_RAW_Z as int16_t;
"""
addr = 0x57
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_1_raw_z,
@property
def dreg_gyro_1_raw_time(self):
"""
Contains time at which the last rate gyro 1 data was acquired.
Payload structure:
[31:0] : GYRO_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_1_RAW_TIME as float;
"""
addr = 0x58
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_raw_time,
@property
def dreg_gyro_2_raw_xy(self):
"""
Contains raw X and Y axis rate gyro 2 data.
Payload structure:
[31:16] : GYRO_2_RAW_X -- Gyro X (2s complement 16-bit integer)
[15:0] : GYRO_2_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_2_RAW_X as int16_t; GYRO_2_RAW_Y as int16_t;
"""
addr = 0x59
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_2_raw_x, gyro_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_2_raw_x, gyro_2_raw_y
@property
def dreg_gyro_2_raw_z(self):
"""
Contains raw Z axis rate gyro 2 data.
Payload structure:
[31:16] : GYRO_2_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_2_RAW_Z as int16_t;
"""
addr = 0x5A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_2_raw_z,
@property
def dreg_gyro_2_raw_time(self):
"""
Contains time at which the last rate gyro 2 data was acquired.
Payload structure:
[31:0] : GYRO_2_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_2_RAW_TIME as float;
"""
addr = 0x5B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_raw_time,
@property
def dreg_accel_1_raw_xy(self):
"""
Contains raw X and Y axis accelerometer 1 data.
Payload structure:
[31:16] : ACCEL_1_RAW_X -- Accel X (2s complement 16-bit integer)
[15:0] : ACCEL_1_RAW_Y -- Accel Y (2s complement 16-bit integer)
:return: ACCEL_1_RAW_X as int16_t; ACCEL_1_RAW_Y as int16_t;
"""
addr = 0x5C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
accel_1_raw_x, accel_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, accel_1_raw_x, accel_1_raw_y
@property
def dreg_accel_1_raw_z(self):
"""
Contains raw Z axis accelerometer 1 data.
Payload structure:
[31:16] : ACCEL_1_RAW_Z -- Accel Z (2s complement 16-bit integer)
:return: ACCEL_1_RAW_Z as int16_t;
"""
addr = 0x5D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
accel_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, accel_1_raw_z,
@property
def dreg_accel_1_raw_time(self):
"""
Contains time at which the last raw data sample for the accelerometer 1 was acquired.
Payload structure:
[31:0] : ACCEL_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_RAW_TIME as float;
"""
addr = 0x5E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_raw_time,
@property
def dreg_mag_1_raw_x(self):
"""
Contains raw x axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_X -- 32-bit signed integer value
:return: MAG_1_RAW_X as int32_t;
"""
addr = 0x5F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_X')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_x, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_x,
@property
def dreg_mag_1_raw_y(self):
"""
Contains raw y axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_Y -- 32-bit signed integer value
:return: MAG_1_RAW_Y as int32_t;
"""
addr = 0x60
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Y')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_y, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_y,
@property
def dreg_mag_1_raw_z(self):
"""
Contains raw z axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_Z -- 32-bit signed integer value
:return: MAG_1_RAW_Z as int32_t;
"""
addr = 0x61
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Z')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_z, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_z,
@property
def dreg_mag_1_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer 1 was acquired.
Payload structure:
[31:0] : MAG_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_1_RAW_TIME as float;
"""
addr = 0x62
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_raw_time,
@property
def dreg_mag_2_raw_xy(self):
"""
Contains raw X and Y axis magnetometer 2 data.
Payload structure:
[31:16] : MAG_2_RAW_X -- Magnetometer X (2s complement 16-bit integer)
[15:0] : MAG_2_RAW_Y -- Magnetometer Y (2s complement 16-bit integer)
:return: MAG_2_RAW_X as int16_t; MAG_2_RAW_Y as int16_t;
"""
addr = 0x63
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
mag_2_raw_x, mag_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, mag_2_raw_x, mag_2_raw_y
@property
def dreg_mag_2_raw_z(self):
"""
Contains raw Z axis magnetometer 2 data.
Payload structure:
[31:16] : MAG_2_RAW_Z -- Magnetometer Z (2s complement 16-bit integer)
:return: MAG_2_RAW_Z as int16_t;
"""
addr = 0x64
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
mag_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, mag_2_raw_z,
@property
def dreg_mag_2_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer 2 was acquired.
Payload structure:
[31:0] : MAG_2_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_2_RAW_TIME as float;
"""
addr = 0x65
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_raw_time,
@property
def dreg_temperature(self):
"""
Contains the temperature output of the onboard temperature sensor.
Payload structure:
[31:0] : TEMPERATURE -- Temperature in degrees Celcius (32-bit IEEE Floating Point)
:return: TEMPERATURE as float;
"""
addr = 0x66
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature, = struct.unpack('>f', payload[0:4])
return reg, temperature,
@property
def dreg_temperature_time(self):
"""
Contains time at which the last temperature was acquired.
Payload structure:
[31:0] : TEMPERATURE_TIME -- 32-bit IEEE Floating Point Value
:return: TEMPERATURE_TIME as float;
"""
addr = 0x67
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature_time, = struct.unpack('>f', payload[0:4])
return reg, temperature_time,
@property
def dreg_gyro_1_proc_x(self):
"""
Contains the actual measured angular rate from the gyro 1 for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_X as float;
"""
addr = 0x68
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_x,
@property
def dreg_gyro_1_proc_y(self):
"""
Contains the actual measured angular rate from the gyro 1 for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_Y as float;
"""
addr = 0x69
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_y,
@property
def dreg_gyro_1_proc_z(self):
"""
Contains the actual measured angular rate from the gyro 1 for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_Z as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_z,
@property
def dreg_gyro_1_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro 1 was measured.
Payload structure:
[31:0] : GYRO_1_PROC_TIME -- Gyro 1 time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_TIME as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_time,
@property
def dreg_gyro_2_proc_x(self):
"""
Contains the actual measured angular rate from the gyro 2 for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_X as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_x,
@property
def dreg_gyro_2_proc_y(self):
"""
Contains the actual measured angular rate from the gyro 2 for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_Y as float;
"""
addr = 0x6D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_y,
@property
def dreg_gyro_2_proc_z(self):
"""
Contains the actual measured angular rate from the gyro 2 for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_Z as float;
"""
addr = 0x6E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_z,
@property
def dreg_gyro_2_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro 2 was measured.
Payload structure:
[31:0] : GYRO_2_PROC_TIME -- Gyro 2 time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_TIME as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_time,
@property
def dreg_accel_1_proc_x(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the x axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_X -- Acceleration X in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_X as float;
"""
addr = 0x70
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_x,
@property
def dreg_accel_1_proc_y(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the y axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_Y -- Acceleration Y in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_Y as float;
"""
addr = 0x71
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_y,
@property
def dreg_accel_1_proc_z(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the z axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_Z -- Acceleration Z in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_Z as float;
"""
addr = 0x72
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_z,
@property
def dreg_accel_1_proc_time(self):
"""
Contains the time at which the last acceleration data from the accelerometer 1 was measured.
Payload structure:
[31:0] : ACCEL_1_PROC_TIME -- Accelerometer 1 time stamp (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_TIME as float;
"""
addr = 0x73
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_time,
@property
def dreg_mag_1_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the x axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_X -- Magnetometer X in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_X as float;
"""
addr = 0x74
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_x,
@property
def dreg_mag_1_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the y axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_Y -- Magnetometer Y in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_Y as float;
"""
addr = 0x75
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_y,
@property
def dreg_mag_1_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the z axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_Z -- Magnetometer Z in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_Z as float;
"""
addr = 0x76
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_z,
@property
def dreg_mag_1_norm(self):
"""
Contains the L2-norm (magnetic norm) for the measured magnetic field from the magnetometer 1 computed over the
calibrated values.
Payload structure:
[31:0] : MAG_1_NORM -- Magnetic norm (32-bit IEEE Floating Point Value)
:return: MAG_1_NORM as float;
"""
addr = 0x77
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_1_norm,
@property
def dreg_mag_1_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer 1 data was acquired.
Payload structure:
[31:0] : MAG_1_PROC_TIME -- Magnetometer 1 time stamp (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_TIME as float;
"""
addr = 0x78
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_time,
@property
def dreg_mag_2_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the x axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_X -- Magnetometer X in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_X as float;
"""
addr = 0x79
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_x,
@property
def dreg_mag_2_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the y axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_Y -- Magnetometer Y in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_Y as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_y,
@property
def dreg_mag_2_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the z axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_Z -- Magnetometer Z in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_Z as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_z,
@property
def dreg_mag_2_norm(self):
"""
Contains the L2-norm (magnetic norm) for the measured magnetic field from the magnetometer 2 computed over the
calibrated values.
Payload structure:
[31:0] : MAG_2_NORM -- Magnetic norm (32-bit IEEE Floating Point Value)
:return: MAG_2_NORM as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_2_norm,
@property
def dreg_mag_2_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer 2 data was acquired.
Payload structure:
[31:0] : MAG_2_PROC_TIME -- Magnetometer 2 time stamp (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_TIME as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_time,
@property
def dreg_quat_ab(self):
"""
Contains the first two components (a and b) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_A -- First quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_B -- Second quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_A as int16_t; QUAT_B as int16_t;
"""
addr = 0x7E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_AB')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_a, quat_b = struct.unpack('>hh', payload[0:4])
return reg, quat_a, quat_b
@property
def dreg_quat_cd(self):
"""
Contains the second two components (c and d) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_C -- Third quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_D -- Fourth quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_C as int16_t; QUAT_D as int16_t;
"""
addr = 0x7F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_CD')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_c, quat_d = struct.unpack('>hh', payload[0:4])
return reg, quat_c, quat_d
@property
def dreg_quat_time(self):
"""
Contains the time that the quaternion attitude was estimated.
Payload structure:
[31:0] : QUAT_TIME -- Quaternion time (32-bit IEEE Floating Point Value)
:return: QUAT_TIME as float;
"""
addr = 0x80
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
quat_time, = struct.unpack('>f', payload[0:4])
return reg, quat_time,
@property
def dreg_euler_phi_theta(self):
"""
Contains the pitch and roll angle estimates.
Payload structure:
[31:16] : PHI -- Roll angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
[15:0] : THETA -- Pitch angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PHI as int16_t; THETA as int16_t;
"""
addr = 0x81
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi, theta = struct.unpack('>hh', payload[0:4])
return reg, phi, theta
@property
def dreg_euler_psi(self):
"""
Contains the yaw angle estimate.
Payload structure:
[31:16] : PSI -- Yaw angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PSI as int16_t;
"""
addr = 0x82
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi, = struct.unpack('>hxx', payload[0:4])
return reg, psi,
@property
def dreg_euler_phi_theta_dot(self):
"""
Contains the pitch and roll rate estimates.
Payload structure:
[31:16] : PHI_DOT -- Roll rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
[15:0] : THETA_DOT -- Pitch rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PHI_DOT as int16_t; THETA_DOT as int16_t;
"""
addr = 0x83
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA_DOT')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi_dot, theta_dot = struct.unpack('>hh', payload[0:4])
return reg, phi_dot, theta_dot
@property
def dreg_euler_psi_dot(self):
"""
Contains the yaw rate estimate.
Payload structure:
[31:16] : PSI_DOT -- Yaw rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PSI_DOT as int16_t;
"""
addr = 0x84
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI_DOT')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi_dot, = struct.unpack('>hxx', payload[0:4])
return reg, psi_dot,
@property
def dreg_euler_time(self):
"""
Contains the time that the Euler Angles were estimated.
Payload structure:
[31:0] : EULER_TIME -- Euler time (32-bit IEEE Floating Point Value)
:return: EULER_TIME as float;
"""
addr = 0x85
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
euler_time, = struct.unpack('>f', payload[0:4])
return reg, euler_time,
@property
def dreg_position_north(self):
"""
Contains the measured north position in meters from the latitude specified in CREG_HOME_NORTH.
Payload structure:
[31:0] : POSITION_NORTH -- North Position (32-bit IEEE Floating Point Value)
:return: POSITION_NORTH as float;
"""
addr = 0x86
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_north, = struct.unpack('>f', payload[0:4])
return reg, position_north,
@property
def dreg_position_east(self):
"""
Contains the measured east position in meters from the longitude specified in CREG_HOME_EAST.
Payload structure:
[31:0] : POSITION_EAST -- East Position (32-bit IEEE Floating Point Value)
:return: POSITION_EAST as float;
"""
addr = 0x87
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_east, = struct.unpack('>f', payload[0:4])
return reg, position_east,
@property
def dreg_position_up(self):
"""
Contains the measured altitude in meters from the altitude specified in CREG_HOME_UP.
Payload structure:
[31:0] : POSITION_UP -- Altitude (32-bit IEEE Floating Point Value)
:return: POSITION_UP as float;
"""
addr = 0x88
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_up, = struct.unpack('>f', payload[0:4])
return reg, position_up,
@property
def dreg_position_time(self):
"""
Contains the time at which the position was acquired.
Payload structure:
[31:0] : POSITION_TIME -- Position Time (32-bit IEEE Floating Point Value)
:return: POSITION_TIME as float;
"""
addr = 0x89
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_time, = struct.unpack('>f', payload[0:4])
return reg, position_time,
@property
def dreg_velocity_north(self):
"""
Contains the measured north velocity in m/s.
Payload structure:
[31:0] : VELOCITY_NORTH -- North Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_NORTH as float;
"""
addr = 0x8A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_north, = struct.unpack('>f', payload[0:4])
return reg, velocity_north,
@property
def dreg_velocity_east(self):
"""
Contains the measured east velocity in m/s.
Payload structure:
[31:0] : VELOCITY_EAST -- East Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_EAST as float;
"""
addr = 0x8B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_east, = struct.unpack('>f', payload[0:4])
return reg, velocity_east,
@property
def dreg_velocity_up(self):
"""
Contains the measured altitude velocity in m/s.
Payload structure:
[31:0] : VELOCITY_UP -- Altitude Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_UP as float;
"""
addr = 0x8C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_up, = struct.unpack('>f', payload[0:4])
return reg, velocity_up,
@property
def dreg_velocity_time(self):
"""
Contains the time at which the velocity was measured.
Payload structure:
[31:0] : VELOCITY_TIME -- Velocity time (32-bit IEEE Floating Point Value)
:return: VELOCITY_TIME as float;
"""
addr = 0x8D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_time, = struct.unpack('>f', payload[0:4])
return reg, velocity_time,
@property
def dreg_gyro_1_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_X -- Gyro 1 bias X (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_X as float;
"""
addr = 0x8E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_x,
@property
def dreg_gyro_1_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_Y -- Gyro 1 bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_Y as float;
"""
addr = 0x8F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_y,
@property
def dreg_gyro_1_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_Z -- Gyro 1 bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_Z as float;
"""
addr = 0x90
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_z,
@property
def dreg_gyro_2_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_X -- Gyro 2 bias X (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_X as float;
"""
addr = 0x91
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_x,
@property
def dreg_gyro_2_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_Y -- Gyro 2 bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_Y as float;
"""
addr = 0x92
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_y,
@property
def dreg_gyro_2_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_Z -- Gyro 2 bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_Z as float;
"""
addr = 0x93
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_z,
@property
def get_fw_build_id(self):
"""
Firmware build identification string: a four byte ASCII character sequence which corresponds to a firmware
series.
Payload structure:
[31:0] : FW_BUILD_ID -- Firmware Build ID string
:return: FW_BUILD_ID as string;
"""
addr = 0xAA
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_ID')
reg.raw_value, = struct.unpack('>I', payload[0:4])
fw_build_id = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return fw_build_id
@property
def get_fw_build_version(self):
"""
Firmware build version provides the unique identifier of the firmware programmed in the board. A response is
four bytes long and identifies major and minor build version, and the build number.
Payload structure:
[31:24] : VERSION_MAJOR -- 8-bit unsigned integer major version number
[23:16] : VERSION_MINOR -- 8-bit unsigned integer minor version number
[15:0] : BUILD_ID -- 16-bit unsigned integer build ID number
:return: VERSION_MAJOR as uint8_t; VERSION_MINOR as uint8_t; BUILD_ID as uint16_t;
"""
addr = 0xAB
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
version_major, version_minor, build_id = struct.unpack('>BBH', payload[0:4])
return reg, version_major, version_minor, build_id
@property
def flash_commit(self):
raise RuntimeError('flash_commit has no getter! The register flash_commit is write-only!')
@flash_commit.setter
def flash_commit(self, new_value):
addr = 0xAC
self.write_register(addr, new_value)
@property
def reset_to_factory(self):
raise RuntimeError('reset_to_factory has no getter! The register reset_to_factory is write-only!')
@reset_to_factory.setter
def reset_to_factory(self, new_value):
addr = 0xAD
self.write_register(addr, new_value)
@property
def zero_gyros(self):
raise RuntimeError('zero_gyros has no getter! The register zero_gyros is write-only!')
@zero_gyros.setter
def zero_gyros(self, new_value):
addr = 0xAE
self.write_register(addr, new_value)
@property
def set_home_position(self):
raise RuntimeError('set_home_position has no getter! The register set_home_position is write-only!')
@set_home_position.setter
def set_home_position(self, new_value):
addr = 0xB0
self.write_register(addr, new_value)
@property
def set_mag_reference(self):
raise RuntimeError('set_mag_reference has no getter! The register set_mag_reference is write-only!')
@set_mag_reference.setter
def set_mag_reference(self, new_value):
addr = 0xB1
self.write_register(addr, new_value)
@property
def calibrate_accelerometers(self):
raise RuntimeError('calibrate_accelerometers has no getter! The register calibrate_accelerometers is write-only!')
@calibrate_accelerometers.setter
def calibrate_accelerometers(self, new_value):
addr = 0xB2
self.write_register(addr, new_value)
@property
def reset_fusion(self):
raise RuntimeError('reset_fusion has no getter! The register reset_fusion is write-only!')
@reset_fusion.setter
def reset_fusion(self, new_value):
addr = 0xB3
self.write_register(addr, new_value)
@property
def enable_zupt(self):
raise RuntimeError('enable_zupt has no getter! The register enable_zupt is write-only!')
@enable_zupt.setter
def enable_zupt(self, new_value):
addr = 0xB4
self.write_register(addr, new_value)
@property
def euler_mode(self):
raise RuntimeError('euler_mode has no getter! The register euler_mode is write-only!')
@euler_mode.setter
def euler_mode(self, new_value):
addr = 0xB5
self.write_register(addr, new_value)
@property
def quaternion_mode(self):
raise RuntimeError('quaternion_mode has no getter! The register quaternion_mode is write-only!')
@quaternion_mode.setter
def quaternion_mode(self, new_value):
addr = 0xB6
self.write_register(addr, new_value)
@property
def enable_rt_calibration(self):
raise RuntimeError('enable_rt_calibration has no getter! The register enable_rt_calibration is write-only!')
@enable_rt_calibration.setter
def enable_rt_calibration(self, new_value):
addr = 0xB7
self.write_register(addr, new_value)
@property
def en_mag_anomaly_detection(self):
raise RuntimeError('en_mag_anomaly_detection has no getter! The register en_mag_anomaly_detection is write-only!')
@en_mag_anomaly_detection.setter
def en_mag_anomaly_detection(self, new_value):
addr = 0xB8
self.write_register(addr, new_value)
@property
def run_self_tests(self):
raise RuntimeError('run_self_tests has no getter! The register run_self_tests is write-only!')
@run_self_tests.setter
def run_self_tests(self, new_value):
addr = 0xB9
self.write_register(addr, new_value)
@property
def enable_external_event(self):
raise RuntimeError('enable_external_event has no getter! The register enable_external_event is write-only!')
@enable_external_event.setter
def enable_external_event(self, new_value):
addr = 0xBA
self.write_register(addr, new_value)
@property
def enable_gnns_fusion(self):
raise RuntimeError('enable_gnns_fusion has no getter! The register enable_gnns_fusion is write-only!')
@enable_gnns_fusion.setter
def enable_gnns_fusion(self, new_value):
addr = 0xBB
self.write_register(addr, new_value)
@property
def enable_usr_euler_output(self):
raise RuntimeError('enable_usr_euler_output has no getter! The register enable_usr_euler_output is write-only!')
@enable_usr_euler_output.setter
def enable_usr_euler_output(self, new_value):
addr = 0xBC
self.write_register(addr, new_value)
@property
def enable_dead_reckoning(self):
raise RuntimeError('enable_dead_reckoning has no getter! The register enable_dead_reckoning is write-only!')
@enable_dead_reckoning.setter
def enable_dead_reckoning(self, new_value):
addr = 0xBD
self.write_register(addr, new_value)
@property
def enable_heave_sway_surge(self):
raise RuntimeError('enable_heave_sway_surge has no getter! The register enable_heave_sway_surge is write-only!')
@enable_heave_sway_surge.setter
def enable_heave_sway_surge(self, new_value):
addr = 0xBE
self.write_register(addr, new_value)
@property
def enable_ukf(self):
raise RuntimeError('enable_ukf has no getter! The register enable_ukf is write-only!')
@enable_ukf.setter
def enable_ukf(self, new_value):
addr = 0xBF
self.write_register(addr, new_value)
@property
def board_unique_id_1(self):
"""
First 32-bits of the 64-bits of the board unique identifier. Bits of the unique identifier cannot be modified
by the user.
Payload structure:
[31:0] : BOARD_UNIQUE_ID_1_BITS -- Board unique ID bits
:return: BOARD_UNIQUE_ID_1_BITS as uint32_t;
"""
addr = 0xFD
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_1_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_1_bits,
@property
def board_unique_id_2(self):
"""
Last 32-bits of the 64-bits of the board unique identifier. Bits of the unique identifier cannot be modified
by the user.
Payload structure:
[31:0] : BOARD_UNIQUE_ID_2_BITS -- Board unique ID bits
:return: BOARD_UNIQUE_ID_2_BITS as uint32_t;
"""
addr = 0xFE
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_2_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_2_bits,
@property
def protocol_version(self):
"""
String version of the protocol.
Payload structure:
[31:0] : PROTOCOL_VERSION_STR -- Protocol version string
:return: PROTOCOL_VERSION_STR as string;
"""
addr = 0xFF
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='PROTOCOL_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
protocol_version_str = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return protocol_version_str
@property
def hidden_gyro_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_VARIANCE as float;
"""
addr = 0x00
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_variance,
@hidden_gyro_1_variance.setter
def hidden_gyro_1_variance(self, new_value):
addr = 0x00
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_VARIANCE as float;
"""
addr = 0x01
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_variance,
@hidden_gyro_2_variance.setter
def hidden_gyro_2_variance(self, new_value):
addr = 0x01
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_VARIANCE as float;
"""
addr = 0x02
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_variance,
@hidden_accel_1_variance.setter
def hidden_accel_1_variance(self, new_value):
addr = 0x02
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_VARIANCE as float;
"""
addr = 0x03
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_variance,
@hidden_mag_1_variance.setter
def hidden_mag_1_variance(self, new_value):
addr = 0x03
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_VARIANCE as float;
"""
addr = 0x04
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_variance,
@hidden_mag_2_variance.setter
def hidden_mag_2_variance(self, new_value):
addr = 0x04
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_course_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_COURSE_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_COURSE_VARIANCE as float;
"""
addr = 0x05
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_COURSE_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_course_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_course_variance,
@hidden_gps_course_variance.setter
def hidden_gps_course_variance(self, new_value):
addr = 0x05
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_position_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_POSITION_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_POSITION_VARIANCE as float;
"""
addr = 0x06
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_POSITION_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_position_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_position_variance,
@hidden_gps_position_variance.setter
def hidden_gps_position_variance(self, new_value):
addr = 0x06
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_velocity_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_VELOCITY_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_VELOCITY_VARIANCE as float;
"""
addr = 0x07
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_VELOCITY_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_velocity_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_velocity_variance,
@hidden_gps_velocity_variance.setter
def hidden_gps_velocity_variance(self, new_value):
addr = 0x07
self.write_register(addr, new_value, hidden=True)
@property
def hidden_static_press_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_STATIC_PRESS_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_STATIC_PRESS_VARIANCE as float;
"""
addr = 0x08
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_STATIC_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_static_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_static_press_variance,
@hidden_static_press_variance.setter
def hidden_static_press_variance(self, new_value):
addr = 0x08
self.write_register(addr, new_value, hidden=True)
@property
def hidden_diff_press_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_DIFF_PRESS_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_DIFF_PRESS_VARIANCE as float;
"""
addr = 0x09
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_DIFF_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_diff_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_diff_press_variance,
@hidden_diff_press_variance.setter
def hidden_diff_press_variance(self, new_value):
addr = 0x09
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_uvw(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_UVW -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_UVW as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_UVW')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_uvw, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_uvw,
@hidden_q_uvw.setter
def hidden_q_uvw(self, new_value):
addr = 0x0A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_quaternion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_QUATERNION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_QUATERNION as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_QUATERNION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_quaternion, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_quaternion,
@hidden_q_quaternion.setter
def hidden_q_quaternion(self, new_value):
addr = 0x0B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_gps_position(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_GPS_POSITION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_GPS_POSITION as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_GPS_POSITION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_gps_position, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_gps_position,
@hidden_q_gps_position.setter
def hidden_q_gps_position(self, new_value):
addr = 0x0C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_bias(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_BIAS -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_BIAS as float;
"""
addr = 0x0D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_BIAS')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_bias, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_bias,
@hidden_q_bias.setter
def hidden_q_bias(self, new_value):
addr = 0x0D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_euler_angles(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_EULER_ANGLES -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_EULER_ANGLES as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_EULER_ANGLES')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_euler_angles, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_euler_angles,
@hidden_q_euler_angles.setter
def hidden_q_euler_angles(self, new_value):
addr = 0x0E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_low_vg_accel_noise_factor(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_low_vg_accel_noise_factor, = struct.unpack('>f', payload[0:4])
return reg, hidden_low_vg_accel_noise_factor,
@hidden_low_vg_accel_noise_factor.setter
def hidden_low_vg_accel_noise_factor(self, new_value):
addr = 0x0F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_groundspeed(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GROUNDSPEED -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GROUNDSPEED as float;
"""
addr = 0x10
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GROUNDSPEED')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_groundspeed, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_groundspeed,
@hidden_lpf_tau_groundspeed.setter
def hidden_lpf_tau_groundspeed(self, new_value):
addr = 0x10
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GYRO_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GYRO_1 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_1,
@hidden_lpf_tau_gyro_1.setter
def hidden_lpf_tau_gyro_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GYRO_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GYRO_2 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_2,
@hidden_lpf_tau_gyro_2.setter
def hidden_lpf_tau_gyro_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_accel_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_ACCEL_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_ACCEL_1 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_ACCEL_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_accel_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_accel_1,
@hidden_lpf_tau_accel_1.setter
def hidden_lpf_tau_accel_1(self, new_value):
addr = 0x13
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_MAG_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_MAG_1 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_1,
@hidden_lpf_tau_mag_1.setter
def hidden_lpf_tau_mag_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_MAG_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_MAG_2 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_2,
@hidden_lpf_tau_mag_2.setter
def hidden_lpf_tau_mag_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_0 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_0,
@hidden_c_gyro_1_bias_x_pow_0.setter
def hidden_c_gyro_1_bias_x_pow_0(self, new_value):
addr = 0x16
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_1 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_1,
@hidden_c_gyro_1_bias_x_pow_1.setter
def hidden_c_gyro_1_bias_x_pow_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_2 as float;
"""
addr = 0x18
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_2,
@hidden_c_gyro_1_bias_x_pow_2.setter
def hidden_c_gyro_1_bias_x_pow_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_3 as float;
"""
addr = 0x19
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_3,
@hidden_c_gyro_1_bias_x_pow_3.setter
def hidden_c_gyro_1_bias_x_pow_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_0 as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_0,
@hidden_c_gyro_1_bias_y_pow_0.setter
def hidden_c_gyro_1_bias_y_pow_0(self, new_value):
addr = 0x1A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_1 as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_1,
@hidden_c_gyro_1_bias_y_pow_1.setter
def hidden_c_gyro_1_bias_y_pow_1(self, new_value):
addr = 0x1B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_2 as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_2,
@hidden_c_gyro_1_bias_y_pow_2.setter
def hidden_c_gyro_1_bias_y_pow_2(self, new_value):
addr = 0x1C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_3 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_3,
@hidden_c_gyro_1_bias_y_pow_3.setter
def hidden_c_gyro_1_bias_y_pow_3(self, new_value):
addr = 0x1D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_0 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_0,
@hidden_c_gyro_1_bias_z_pow_0.setter
def hidden_c_gyro_1_bias_z_pow_0(self, new_value):
addr = 0x1E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_1 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_1,
@hidden_c_gyro_1_bias_z_pow_1.setter
def hidden_c_gyro_1_bias_z_pow_1(self, new_value):
addr = 0x1F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_2 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_2,
@hidden_c_gyro_1_bias_z_pow_2.setter
def hidden_c_gyro_1_bias_z_pow_2(self, new_value):
addr = 0x20
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_3 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_3,
@hidden_c_gyro_1_bias_z_pow_3.setter
def hidden_c_gyro_1_bias_z_pow_3(self, new_value):
addr = 0x21
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_0 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_0,
@hidden_c_gyro_1_scale_x_pow_0.setter
def hidden_c_gyro_1_scale_x_pow_0(self, new_value):
addr = 0x22
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_1 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_1,
@hidden_c_gyro_1_scale_x_pow_1.setter
def hidden_c_gyro_1_scale_x_pow_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_2 as float;
"""
addr = 0x24
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_2,
@hidden_c_gyro_1_scale_x_pow_2.setter
def hidden_c_gyro_1_scale_x_pow_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_3 as float;
"""
addr = 0x25
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_3,
@hidden_c_gyro_1_scale_x_pow_3.setter
def hidden_c_gyro_1_scale_x_pow_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_0 as float;
"""
addr = 0x26
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_0,
@hidden_c_gyro_1_scale_y_pow_0.setter
def hidden_c_gyro_1_scale_y_pow_0(self, new_value):
addr = 0x26
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_1 as float;
"""
addr = 0x27
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_1,
@hidden_c_gyro_1_scale_y_pow_1.setter
def hidden_c_gyro_1_scale_y_pow_1(self, new_value):
addr = 0x27
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_2 as float;
"""
addr = 0x28
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_2,
@hidden_c_gyro_1_scale_y_pow_2.setter
def hidden_c_gyro_1_scale_y_pow_2(self, new_value):
addr = 0x28
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_3 as float;
"""
addr = 0x29
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_3,
@hidden_c_gyro_1_scale_y_pow_3.setter
def hidden_c_gyro_1_scale_y_pow_3(self, new_value):
addr = 0x29
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_0 as float;
"""
addr = 0x2A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_0,
@hidden_c_gyro_1_scale_z_pow_0.setter
def hidden_c_gyro_1_scale_z_pow_0(self, new_value):
addr = 0x2A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_1 as float;
"""
addr = 0x2B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_1,
@hidden_c_gyro_1_scale_z_pow_1.setter
def hidden_c_gyro_1_scale_z_pow_1(self, new_value):
addr = 0x2B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_2 as float;
"""
addr = 0x2C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_2,
@hidden_c_gyro_1_scale_z_pow_2.setter
def hidden_c_gyro_1_scale_z_pow_2(self, new_value):
addr = 0x2C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_3 as float;
"""
addr = 0x2D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_3,
@hidden_c_gyro_1_scale_z_pow_3.setter
def hidden_c_gyro_1_scale_z_pow_3(self, new_value):
addr = 0x2D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_1 as float;
"""
addr = 0x2E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_1,
@hidden_gyro_1_alignment1_1.setter
def hidden_gyro_1_alignment1_1(self, new_value):
addr = 0x2E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_2 as float;
"""
addr = 0x2F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_2,
@hidden_gyro_1_alignment1_2.setter
def hidden_gyro_1_alignment1_2(self, new_value):
addr = 0x2F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_3 as float;
"""
addr = 0x30
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_3,
@hidden_gyro_1_alignment1_3.setter
def hidden_gyro_1_alignment1_3(self, new_value):
addr = 0x30
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_1 as float;
"""
addr = 0x31
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_1,
@hidden_gyro_1_alignment2_1.setter
def hidden_gyro_1_alignment2_1(self, new_value):
addr = 0x31
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_2 as float;
"""
addr = 0x32
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_2,
@hidden_gyro_1_alignment2_2.setter
def hidden_gyro_1_alignment2_2(self, new_value):
addr = 0x32
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_3 as float;
"""
addr = 0x33
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_3,
@hidden_gyro_1_alignment2_3.setter
def hidden_gyro_1_alignment2_3(self, new_value):
addr = 0x33
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_1 as float;
"""
addr = 0x34
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_1,
@hidden_gyro_1_alignment3_1.setter
def hidden_gyro_1_alignment3_1(self, new_value):
addr = 0x34
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_2 as float;
"""
addr = 0x35
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_2,
@hidden_gyro_1_alignment3_2.setter
def hidden_gyro_1_alignment3_2(self, new_value):
addr = 0x35
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_3 as float;
"""
addr = 0x36
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_3,
@hidden_gyro_1_alignment3_3.setter
def hidden_gyro_1_alignment3_3(self, new_value):
addr = 0x36
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_0 as float;
"""
addr = 0x37
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_0,
@hidden_c_gyro_2_bias_x_pow_0.setter
def hidden_c_gyro_2_bias_x_pow_0(self, new_value):
addr = 0x37
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_1 as float;
"""
addr = 0x38
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_1,
@hidden_c_gyro_2_bias_x_pow_1.setter
def hidden_c_gyro_2_bias_x_pow_1(self, new_value):
addr = 0x38
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_2 as float;
"""
addr = 0x39
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_2,
@hidden_c_gyro_2_bias_x_pow_2.setter
def hidden_c_gyro_2_bias_x_pow_2(self, new_value):
addr = 0x39
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_3 as float;
"""
addr = 0x3A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_3,
@hidden_c_gyro_2_bias_x_pow_3.setter
def hidden_c_gyro_2_bias_x_pow_3(self, new_value):
addr = 0x3A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_0 as float;
"""
addr = 0x3B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_0,
@hidden_c_gyro_2_bias_y_pow_0.setter
def hidden_c_gyro_2_bias_y_pow_0(self, new_value):
addr = 0x3B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_1 as float;
"""
addr = 0x3C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_1,
@hidden_c_gyro_2_bias_y_pow_1.setter
def hidden_c_gyro_2_bias_y_pow_1(self, new_value):
addr = 0x3C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_2 as float;
"""
addr = 0x3D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_2,
@hidden_c_gyro_2_bias_y_pow_2.setter
def hidden_c_gyro_2_bias_y_pow_2(self, new_value):
addr = 0x3D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_3 as float;
"""
addr = 0x3E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_3,
@hidden_c_gyro_2_bias_y_pow_3.setter
def hidden_c_gyro_2_bias_y_pow_3(self, new_value):
addr = 0x3E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_0 as float;
"""
addr = 0x3F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_0,
@hidden_c_gyro_2_bias_z_pow_0.setter
def hidden_c_gyro_2_bias_z_pow_0(self, new_value):
addr = 0x3F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_1 as float;
"""
addr = 0x40
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_1,
@hidden_c_gyro_2_bias_z_pow_1.setter
def hidden_c_gyro_2_bias_z_pow_1(self, new_value):
addr = 0x40
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_2 as float;
"""
addr = 0x41
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_2,
@hidden_c_gyro_2_bias_z_pow_2.setter
def hidden_c_gyro_2_bias_z_pow_2(self, new_value):
addr = 0x41
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_3 as float;
"""
addr = 0x42
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_3,
@hidden_c_gyro_2_bias_z_pow_3.setter
def hidden_c_gyro_2_bias_z_pow_3(self, new_value):
addr = 0x42
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_0 as float;
"""
addr = 0x43
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_0,
@hidden_c_gyro_2_scale_x_pow_0.setter
def hidden_c_gyro_2_scale_x_pow_0(self, new_value):
addr = 0x43
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_1 as float;
"""
addr = 0x44
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_1,
@hidden_c_gyro_2_scale_x_pow_1.setter
def hidden_c_gyro_2_scale_x_pow_1(self, new_value):
addr = 0x44
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_2 as float;
"""
addr = 0x45
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_2,
@hidden_c_gyro_2_scale_x_pow_2.setter
def hidden_c_gyro_2_scale_x_pow_2(self, new_value):
addr = 0x45
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_3 as float;
"""
addr = 0x46
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_3,
@hidden_c_gyro_2_scale_x_pow_3.setter
def hidden_c_gyro_2_scale_x_pow_3(self, new_value):
addr = 0x46
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_0 as float;
"""
addr = 0x47
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_0,
@hidden_c_gyro_2_scale_y_pow_0.setter
def hidden_c_gyro_2_scale_y_pow_0(self, new_value):
addr = 0x47
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_1 as float;
"""
addr = 0x48
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_1,
@hidden_c_gyro_2_scale_y_pow_1.setter
def hidden_c_gyro_2_scale_y_pow_1(self, new_value):
addr = 0x48
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_2 as float;
"""
addr = 0x49
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_2,
@hidden_c_gyro_2_scale_y_pow_2.setter
def hidden_c_gyro_2_scale_y_pow_2(self, new_value):
addr = 0x49
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_3 as float;
"""
addr = 0x4A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_3,
@hidden_c_gyro_2_scale_y_pow_3.setter
def hidden_c_gyro_2_scale_y_pow_3(self, new_value):
addr = 0x4A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_0 as float;
"""
addr = 0x4B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_0,
@hidden_c_gyro_2_scale_z_pow_0.setter
def hidden_c_gyro_2_scale_z_pow_0(self, new_value):
addr = 0x4B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_1 as float;
"""
addr = 0x4C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_1,
@hidden_c_gyro_2_scale_z_pow_1.setter
def hidden_c_gyro_2_scale_z_pow_1(self, new_value):
addr = 0x4C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_2 as float;
"""
addr = 0x4D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_2,
@hidden_c_gyro_2_scale_z_pow_2.setter
def hidden_c_gyro_2_scale_z_pow_2(self, new_value):
addr = 0x4D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_3 as float;
"""
addr = 0x4E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_3,
@hidden_c_gyro_2_scale_z_pow_3.setter
def hidden_c_gyro_2_scale_z_pow_3(self, new_value):
addr = 0x4E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_1 as float;
"""
addr = 0x4F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_1,
@hidden_gyro_2_alignment1_1.setter
def hidden_gyro_2_alignment1_1(self, new_value):
addr = 0x4F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_2 as float;
"""
addr = 0x50
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_2,
@hidden_gyro_2_alignment1_2.setter
def hidden_gyro_2_alignment1_2(self, new_value):
addr = 0x50
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_3 as float;
"""
addr = 0x51
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_3,
@hidden_gyro_2_alignment1_3.setter
def hidden_gyro_2_alignment1_3(self, new_value):
addr = 0x51
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_1 as float;
"""
addr = 0x52
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_1,
@hidden_gyro_2_alignment2_1.setter
def hidden_gyro_2_alignment2_1(self, new_value):
addr = 0x52
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_2 as float;
"""
addr = 0x53
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_2,
@hidden_gyro_2_alignment2_2.setter
def hidden_gyro_2_alignment2_2(self, new_value):
addr = 0x53
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_3 as float;
"""
addr = 0x54
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_3,
@hidden_gyro_2_alignment2_3.setter
def hidden_gyro_2_alignment2_3(self, new_value):
addr = 0x54
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_1 as float;
"""
addr = 0x55
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_1,
@hidden_gyro_2_alignment3_1.setter
def hidden_gyro_2_alignment3_1(self, new_value):
addr = 0x55
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_2 as float;
"""
addr = 0x56
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_2,
@hidden_gyro_2_alignment3_2.setter
def hidden_gyro_2_alignment3_2(self, new_value):
addr = 0x56
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_3 as float;
"""
addr = 0x57
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_3,
@hidden_gyro_2_alignment3_3.setter
def hidden_gyro_2_alignment3_3(self, new_value):
addr = 0x57
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_0 as float;
"""
addr = 0x58
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_0,
@hidden_c_accel_1_bias_x_pow_0.setter
def hidden_c_accel_1_bias_x_pow_0(self, new_value):
addr = 0x58
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_1 as float;
"""
addr = 0x59
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_1,
@hidden_c_accel_1_bias_x_pow_1.setter
def hidden_c_accel_1_bias_x_pow_1(self, new_value):
addr = 0x59
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_2 as float;
"""
addr = 0x5A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_2,
@hidden_c_accel_1_bias_x_pow_2.setter
def hidden_c_accel_1_bias_x_pow_2(self, new_value):
addr = 0x5A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_3 as float;
"""
addr = 0x5B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_3,
@hidden_c_accel_1_bias_x_pow_3.setter
def hidden_c_accel_1_bias_x_pow_3(self, new_value):
addr = 0x5B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_0 as float;
"""
addr = 0x5C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_0,
@hidden_c_accel_1_bias_y_pow_0.setter
def hidden_c_accel_1_bias_y_pow_0(self, new_value):
addr = 0x5C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_1 as float;
"""
addr = 0x5D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_1,
@hidden_c_accel_1_bias_y_pow_1.setter
def hidden_c_accel_1_bias_y_pow_1(self, new_value):
addr = 0x5D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_2 as float;
"""
addr = 0x5E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_2,
@hidden_c_accel_1_bias_y_pow_2.setter
def hidden_c_accel_1_bias_y_pow_2(self, new_value):
addr = 0x5E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_3 as float;
"""
addr = 0x5F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_3,
@hidden_c_accel_1_bias_y_pow_3.setter
def hidden_c_accel_1_bias_y_pow_3(self, new_value):
addr = 0x5F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_0 as float;
"""
addr = 0x60
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_0,
@hidden_c_accel_1_bias_z_pow_0.setter
def hidden_c_accel_1_bias_z_pow_0(self, new_value):
addr = 0x60
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_1 as float;
"""
addr = 0x61
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_1,
@hidden_c_accel_1_bias_z_pow_1.setter
def hidden_c_accel_1_bias_z_pow_1(self, new_value):
addr = 0x61
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_2 as float;
"""
addr = 0x62
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_2,
@hidden_c_accel_1_bias_z_pow_2.setter
def hidden_c_accel_1_bias_z_pow_2(self, new_value):
addr = 0x62
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_3 as float;
"""
addr = 0x63
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_3,
@hidden_c_accel_1_bias_z_pow_3.setter
def hidden_c_accel_1_bias_z_pow_3(self, new_value):
addr = 0x63
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_0 as float;
"""
addr = 0x64
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_0,
@hidden_c_accel_1_scale_x_pow_0.setter
def hidden_c_accel_1_scale_x_pow_0(self, new_value):
addr = 0x64
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_1 as float;
"""
addr = 0x65
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_1,
@hidden_c_accel_1_scale_x_pow_1.setter
def hidden_c_accel_1_scale_x_pow_1(self, new_value):
addr = 0x65
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_2 as float;
"""
addr = 0x66
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_2,
@hidden_c_accel_1_scale_x_pow_2.setter
def hidden_c_accel_1_scale_x_pow_2(self, new_value):
addr = 0x66
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_3 as float;
"""
addr = 0x67
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_3,
@hidden_c_accel_1_scale_x_pow_3.setter
def hidden_c_accel_1_scale_x_pow_3(self, new_value):
addr = 0x67
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_0 as float;
"""
addr = 0x68
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_0,
@hidden_c_accel_1_scale_y_pow_0.setter
def hidden_c_accel_1_scale_y_pow_0(self, new_value):
addr = 0x68
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_1 as float;
"""
addr = 0x69
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_1,
@hidden_c_accel_1_scale_y_pow_1.setter
def hidden_c_accel_1_scale_y_pow_1(self, new_value):
addr = 0x69
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_2 as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_2,
@hidden_c_accel_1_scale_y_pow_2.setter
def hidden_c_accel_1_scale_y_pow_2(self, new_value):
addr = 0x6A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_3 as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_3,
@hidden_c_accel_1_scale_y_pow_3.setter
def hidden_c_accel_1_scale_y_pow_3(self, new_value):
addr = 0x6B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_0 as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_0,
@hidden_c_accel_1_scale_z_pow_0.setter
def hidden_c_accel_1_scale_z_pow_0(self, new_value):
addr = 0x6C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_1 as float;
"""
addr = 0x6D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_1,
@hidden_c_accel_1_scale_z_pow_1.setter
def hidden_c_accel_1_scale_z_pow_1(self, new_value):
addr = 0x6D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_2 as float;
"""
addr = 0x6E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_2,
@hidden_c_accel_1_scale_z_pow_2.setter
def hidden_c_accel_1_scale_z_pow_2(self, new_value):
addr = 0x6E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_3 as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_3,
@hidden_c_accel_1_scale_z_pow_3.setter
def hidden_c_accel_1_scale_z_pow_3(self, new_value):
addr = 0x6F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_1 as float;
"""
addr = 0x70
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_1,
@hidden_accel_1_alignment1_1.setter
def hidden_accel_1_alignment1_1(self, new_value):
addr = 0x70
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_2 as float;
"""
addr = 0x71
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_2,
@hidden_accel_1_alignment1_2.setter
def hidden_accel_1_alignment1_2(self, new_value):
addr = 0x71
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_3 as float;
"""
addr = 0x72
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_3,
@hidden_accel_1_alignment1_3.setter
def hidden_accel_1_alignment1_3(self, new_value):
addr = 0x72
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_1 as float;
"""
addr = 0x73
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_1,
@hidden_accel_1_alignment2_1.setter
def hidden_accel_1_alignment2_1(self, new_value):
addr = 0x73
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_2 as float;
"""
addr = 0x74
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_2,
@hidden_accel_1_alignment2_2.setter
def hidden_accel_1_alignment2_2(self, new_value):
addr = 0x74
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_3 as float;
"""
addr = 0x75
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_3,
@hidden_accel_1_alignment2_3.setter
def hidden_accel_1_alignment2_3(self, new_value):
addr = 0x75
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_1 as float;
"""
addr = 0x76
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_1,
@hidden_accel_1_alignment3_1.setter
def hidden_accel_1_alignment3_1(self, new_value):
addr = 0x76
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_2 as float;
"""
addr = 0x77
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_2,
@hidden_accel_1_alignment3_2.setter
def hidden_accel_1_alignment3_2(self, new_value):
addr = 0x77
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_3 as float;
"""
addr = 0x78
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_3,
@hidden_accel_1_alignment3_3.setter
def hidden_accel_1_alignment3_3(self, new_value):
addr = 0x78
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_0 as float;
"""
addr = 0x79
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_0,
@hidden_c_mag_1_bias_x_pow_0.setter
def hidden_c_mag_1_bias_x_pow_0(self, new_value):
addr = 0x79
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_1 as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_1,
@hidden_c_mag_1_bias_x_pow_1.setter
def hidden_c_mag_1_bias_x_pow_1(self, new_value):
addr = 0x7A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_2 as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_2,
@hidden_c_mag_1_bias_x_pow_2.setter
def hidden_c_mag_1_bias_x_pow_2(self, new_value):
addr = 0x7B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_3 as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_3,
@hidden_c_mag_1_bias_x_pow_3.setter
def hidden_c_mag_1_bias_x_pow_3(self, new_value):
addr = 0x7C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_0 as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_0,
@hidden_c_mag_1_bias_y_pow_0.setter
def hidden_c_mag_1_bias_y_pow_0(self, new_value):
addr = 0x7D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_1 as float;
"""
addr = 0x7E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_1,
@hidden_c_mag_1_bias_y_pow_1.setter
def hidden_c_mag_1_bias_y_pow_1(self, new_value):
addr = 0x7E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_2 as float;
"""
addr = 0x7F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_2,
@hidden_c_mag_1_bias_y_pow_2.setter
def hidden_c_mag_1_bias_y_pow_2(self, new_value):
addr = 0x7F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_3 as float;
"""
addr = 0x80
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_3,
@hidden_c_mag_1_bias_y_pow_3.setter
def hidden_c_mag_1_bias_y_pow_3(self, new_value):
addr = 0x80
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_0 as float;
"""
addr = 0x81
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_0,
@hidden_c_mag_1_bias_z_pow_0.setter
def hidden_c_mag_1_bias_z_pow_0(self, new_value):
addr = 0x81
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_1 as float;
"""
addr = 0x82
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_1,
@hidden_c_mag_1_bias_z_pow_1.setter
def hidden_c_mag_1_bias_z_pow_1(self, new_value):
addr = 0x82
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_2 as float;
"""
addr = 0x83
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_2,
@hidden_c_mag_1_bias_z_pow_2.setter
def hidden_c_mag_1_bias_z_pow_2(self, new_value):
addr = 0x83
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_3 as float;
"""
addr = 0x84
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_3,
@hidden_c_mag_1_bias_z_pow_3.setter
def hidden_c_mag_1_bias_z_pow_3(self, new_value):
addr = 0x84
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_0 as float;
"""
addr = 0x85
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_0,
@hidden_c_mag_1_scale_x_pow_0.setter
def hidden_c_mag_1_scale_x_pow_0(self, new_value):
addr = 0x85
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_1 as float;
"""
addr = 0x86
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_1,
@hidden_c_mag_1_scale_x_pow_1.setter
def hidden_c_mag_1_scale_x_pow_1(self, new_value):
addr = 0x86
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_2 as float;
"""
addr = 0x87
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_2,
@hidden_c_mag_1_scale_x_pow_2.setter
def hidden_c_mag_1_scale_x_pow_2(self, new_value):
addr = 0x87
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_3 as float;
"""
addr = 0x88
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_3,
@hidden_c_mag_1_scale_x_pow_3.setter
def hidden_c_mag_1_scale_x_pow_3(self, new_value):
addr = 0x88
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_0 as float;
"""
addr = 0x89
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_0,
@hidden_c_mag_1_scale_y_pow_0.setter
def hidden_c_mag_1_scale_y_pow_0(self, new_value):
addr = 0x89
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_1 as float;
"""
addr = 0x8A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_1,
@hidden_c_mag_1_scale_y_pow_1.setter
def hidden_c_mag_1_scale_y_pow_1(self, new_value):
addr = 0x8A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_2 as float;
"""
addr = 0x8B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_2,
@hidden_c_mag_1_scale_y_pow_2.setter
def hidden_c_mag_1_scale_y_pow_2(self, new_value):
addr = 0x8B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_3 as float;
"""
addr = 0x8C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_3,
@hidden_c_mag_1_scale_y_pow_3.setter
def hidden_c_mag_1_scale_y_pow_3(self, new_value):
addr = 0x8C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_0 as float;
"""
addr = 0x8D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_0,
@hidden_c_mag_1_scale_z_pow_0.setter
def hidden_c_mag_1_scale_z_pow_0(self, new_value):
addr = 0x8D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_1 as float;
"""
addr = 0x8E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_1,
@hidden_c_mag_1_scale_z_pow_1.setter
def hidden_c_mag_1_scale_z_pow_1(self, new_value):
addr = 0x8E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_2 as float;
"""
addr = 0x8F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_2,
@hidden_c_mag_1_scale_z_pow_2.setter
def hidden_c_mag_1_scale_z_pow_2(self, new_value):
addr = 0x8F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_3 as float;
"""
addr = 0x90
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_3,
@hidden_c_mag_1_scale_z_pow_3.setter
def hidden_c_mag_1_scale_z_pow_3(self, new_value):
addr = 0x90
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_1 as float;
"""
addr = 0x91
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_1,
@hidden_mag_1_alignment1_1.setter
def hidden_mag_1_alignment1_1(self, new_value):
addr = 0x91
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_2 as float;
"""
addr = 0x92
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_2,
@hidden_mag_1_alignment1_2.setter
def hidden_mag_1_alignment1_2(self, new_value):
addr = 0x92
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_3 as float;
"""
addr = 0x93
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_3,
@hidden_mag_1_alignment1_3.setter
def hidden_mag_1_alignment1_3(self, new_value):
addr = 0x93
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_1 as float;
"""
addr = 0x94
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_1,
@hidden_mag_1_alignment2_1.setter
def hidden_mag_1_alignment2_1(self, new_value):
addr = 0x94
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_2 as float;
"""
addr = 0x95
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_2,
@hidden_mag_1_alignment2_2.setter
def hidden_mag_1_alignment2_2(self, new_value):
addr = 0x95
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_3 as float;
"""
addr = 0x96
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_3,
@hidden_mag_1_alignment2_3.setter
def hidden_mag_1_alignment2_3(self, new_value):
addr = 0x96
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_1 as float;
"""
addr = 0x97
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_1,
@hidden_mag_1_alignment3_1.setter
def hidden_mag_1_alignment3_1(self, new_value):
addr = 0x97
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_2 as float;
"""
addr = 0x98
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_2,
@hidden_mag_1_alignment3_2.setter
def hidden_mag_1_alignment3_2(self, new_value):
addr = 0x98
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_3 as float;
"""
addr = 0x99
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_3,
@hidden_mag_1_alignment3_3.setter
def hidden_mag_1_alignment3_3(self, new_value):
addr = 0x99
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_x(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_X -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_X as float;
"""
addr = 0x9A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_x,
@hidden_mag_1_reference_x.setter
def hidden_mag_1_reference_x(self, new_value):
addr = 0x9A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_y(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_Y -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_Y as float;
"""
addr = 0x9B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_y,
@hidden_mag_1_reference_y.setter
def hidden_mag_1_reference_y(self, new_value):
addr = 0x9B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_z(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_Z -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_Z as float;
"""
addr = 0x9C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_z,
@hidden_mag_1_reference_z.setter
def hidden_mag_1_reference_z(self, new_value):
addr = 0x9C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_0 as float;
"""
addr = 0x9D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_0,
@hidden_c_mag_2_bias_x_pow_0.setter
def hidden_c_mag_2_bias_x_pow_0(self, new_value):
addr = 0x9D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_1 as float;
"""
addr = 0x9E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_1,
@hidden_c_mag_2_bias_x_pow_1.setter
def hidden_c_mag_2_bias_x_pow_1(self, new_value):
addr = 0x9E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_2 as float;
"""
addr = 0x9F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_2,
@hidden_c_mag_2_bias_x_pow_2.setter
def hidden_c_mag_2_bias_x_pow_2(self, new_value):
addr = 0x9F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_3 as float;
"""
addr = 0xA0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_3,
@hidden_c_mag_2_bias_x_pow_3.setter
def hidden_c_mag_2_bias_x_pow_3(self, new_value):
addr = 0xA0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_0 as float;
"""
addr = 0xA1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_0,
@hidden_c_mag_2_bias_y_pow_0.setter
def hidden_c_mag_2_bias_y_pow_0(self, new_value):
addr = 0xA1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_1 as float;
"""
addr = 0xA2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_1,
@hidden_c_mag_2_bias_y_pow_1.setter
def hidden_c_mag_2_bias_y_pow_1(self, new_value):
addr = 0xA2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_2 as float;
"""
addr = 0xA3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_2,
@hidden_c_mag_2_bias_y_pow_2.setter
def hidden_c_mag_2_bias_y_pow_2(self, new_value):
addr = 0xA3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_3 as float;
"""
addr = 0xA4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_3,
@hidden_c_mag_2_bias_y_pow_3.setter
def hidden_c_mag_2_bias_y_pow_3(self, new_value):
addr = 0xA4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_0 as float;
"""
addr = 0xA5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_0,
@hidden_c_mag_2_bias_z_pow_0.setter
def hidden_c_mag_2_bias_z_pow_0(self, new_value):
addr = 0xA5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_1 as float;
"""
addr = 0xA6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_1,
@hidden_c_mag_2_bias_z_pow_1.setter
def hidden_c_mag_2_bias_z_pow_1(self, new_value):
addr = 0xA6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_2 as float;
"""
addr = 0xA7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_2,
@hidden_c_mag_2_bias_z_pow_2.setter
def hidden_c_mag_2_bias_z_pow_2(self, new_value):
addr = 0xA7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_3 as float;
"""
addr = 0xA8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_3,
@hidden_c_mag_2_bias_z_pow_3.setter
def hidden_c_mag_2_bias_z_pow_3(self, new_value):
addr = 0xA8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_0 as float;
"""
addr = 0xA9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_0,
@hidden_c_mag_2_scale_x_pow_0.setter
def hidden_c_mag_2_scale_x_pow_0(self, new_value):
addr = 0xA9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_1 as float;
"""
addr = 0xAA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_1,
@hidden_c_mag_2_scale_x_pow_1.setter
def hidden_c_mag_2_scale_x_pow_1(self, new_value):
addr = 0xAA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_2 as float;
"""
addr = 0xAB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_2,
@hidden_c_mag_2_scale_x_pow_2.setter
def hidden_c_mag_2_scale_x_pow_2(self, new_value):
addr = 0xAB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_3 as float;
"""
addr = 0xAC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_3,
@hidden_c_mag_2_scale_x_pow_3.setter
def hidden_c_mag_2_scale_x_pow_3(self, new_value):
addr = 0xAC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_0 as float;
"""
addr = 0xAD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_0,
@hidden_c_mag_2_scale_y_pow_0.setter
def hidden_c_mag_2_scale_y_pow_0(self, new_value):
addr = 0xAD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_1 as float;
"""
addr = 0xAE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_1,
@hidden_c_mag_2_scale_y_pow_1.setter
def hidden_c_mag_2_scale_y_pow_1(self, new_value):
addr = 0xAE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_2 as float;
"""
addr = 0xAF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_2,
@hidden_c_mag_2_scale_y_pow_2.setter
def hidden_c_mag_2_scale_y_pow_2(self, new_value):
addr = 0xAF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_3 as float;
"""
addr = 0xB0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_3,
@hidden_c_mag_2_scale_y_pow_3.setter
def hidden_c_mag_2_scale_y_pow_3(self, new_value):
addr = 0xB0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_0 as float;
"""
addr = 0xB1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_0,
@hidden_c_mag_2_scale_z_pow_0.setter
def hidden_c_mag_2_scale_z_pow_0(self, new_value):
addr = 0xB1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_1 as float;
"""
addr = 0xB2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_1,
@hidden_c_mag_2_scale_z_pow_1.setter
def hidden_c_mag_2_scale_z_pow_1(self, new_value):
addr = 0xB2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_2 as float;
"""
addr = 0xB3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_2,
@hidden_c_mag_2_scale_z_pow_2.setter
def hidden_c_mag_2_scale_z_pow_2(self, new_value):
addr = 0xB3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_3 as float;
"""
addr = 0xB4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_3,
@hidden_c_mag_2_scale_z_pow_3.setter
def hidden_c_mag_2_scale_z_pow_3(self, new_value):
addr = 0xB4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_1 as float;
"""
addr = 0xB5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_1,
@hidden_mag_2_alignment1_1.setter
def hidden_mag_2_alignment1_1(self, new_value):
addr = 0xB5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_2 as float;
"""
addr = 0xB6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_2,
@hidden_mag_2_alignment1_2.setter
def hidden_mag_2_alignment1_2(self, new_value):
addr = 0xB6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_3 as float;
"""
addr = 0xB7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_3,
@hidden_mag_2_alignment1_3.setter
def hidden_mag_2_alignment1_3(self, new_value):
addr = 0xB7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_1 as float;
"""
addr = 0xB8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_1,
@hidden_mag_2_alignment2_1.setter
def hidden_mag_2_alignment2_1(self, new_value):
addr = 0xB8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_2 as float;
"""
addr = 0xB9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_2,
@hidden_mag_2_alignment2_2.setter
def hidden_mag_2_alignment2_2(self, new_value):
addr = 0xB9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_3 as float;
"""
addr = 0xBA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_3,
@hidden_mag_2_alignment2_3.setter
def hidden_mag_2_alignment2_3(self, new_value):
addr = 0xBA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_1 as float;
"""
addr = 0xBB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_1,
@hidden_mag_2_alignment3_1.setter
def hidden_mag_2_alignment3_1(self, new_value):
addr = 0xBB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_2 as float;
"""
addr = 0xBC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_2,
@hidden_mag_2_alignment3_2.setter
def hidden_mag_2_alignment3_2(self, new_value):
addr = 0xBC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_3 as float;
"""
addr = 0xBD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_3,
@hidden_mag_2_alignment3_3.setter
def hidden_mag_2_alignment3_3(self, new_value):
addr = 0xBD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_x(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_X -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_X as float;
"""
addr = 0xBE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_x,
@hidden_mag_2_reference_x.setter
def hidden_mag_2_reference_x(self, new_value):
addr = 0xBE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_y(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_Y -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_Y as float;
"""
addr = 0xBF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_y,
@hidden_mag_2_reference_y.setter
def hidden_mag_2_reference_y(self, new_value):
addr = 0xBF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_z(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_Z -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_Z as float;
"""
addr = 0xC0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_z,
@hidden_mag_2_reference_z.setter
def hidden_mag_2_reference_z(self, new_value):
addr = 0xC0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_CONVERSION as float;
"""
addr = 0xC1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_conversion,
@property
def hidden_gyro_2_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_CONVERSION as float;
"""
addr = 0xC2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_conversion,
@property
def hidden_accel_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_CONVERSION as float;
"""
addr = 0xC3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_conversion,
@property
def hidden_mag_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_CONVERSION as float;
"""
addr = 0xC4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_conversion,
@property
def hidden_mag_2_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_CONVERSION as float;
"""
addr = 0xC5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_conversion,
if __name__ == '__main__':
pass
|
def binaryToInt (string: str, oneChar = "1", zeroChar = "0"):
out = 0
for i in range(len(string)):
currentDigit = None
if string[len(string) - 1 - i] == oneChar:
currentDigit = 1
elif string[len(string) - 1 - i] == zeroChar:
currentDigit = 0
out += (2**i) * currentDigit
return(out)
if __name__ == "__main__":
print(binaryToInt("1011"))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from poetry.poetry import Poetry
from poetry.utils._compat import Path
from poetry.utils.toml_file import TomlFile
fixtures_dir = Path(__file__).parent / "fixtures"
def test_poetry():
poetry = Poetry.create(str(fixtures_dir / "sample_project"))
package = poetry.package
assert package.name == "my-package"
assert package.version.text == "1.2.3"
assert package.description == "Some description."
assert package.authors == ["Sébastien Eustace <sebastien@eustace.io>"]
assert package.license.id == "MIT"
assert (
package.readme.relative_to(fixtures_dir).as_posix()
== "sample_project/README.rst"
)
assert package.homepage == "https://poetry.eustace.io"
assert package.repository_url == "https://github.com/sdispater/poetry"
assert package.keywords == ["packaging", "dependency", "poetry"]
assert package.python_versions == "~2.7 || ^3.6"
assert str(package.python_constraint) == ">=2.7,<2.8 || >=3.6,<4.0"
dependencies = {}
for dep in package.requires:
dependencies[dep.name] = dep
cleo = dependencies["cleo"]
assert cleo.pretty_constraint == "^0.6"
assert not cleo.is_optional()
pendulum = dependencies["pendulum"]
assert pendulum.pretty_constraint == "branch 2.0"
assert pendulum.is_vcs()
assert pendulum.vcs == "git"
assert pendulum.branch == "2.0"
assert pendulum.source == "https://github.com/sdispater/pendulum.git"
assert pendulum.allows_prereleases()
requests = dependencies["requests"]
assert requests.pretty_constraint == "^2.18"
assert not requests.is_vcs()
assert not requests.allows_prereleases()
assert requests.is_optional()
assert requests.extras == ["security"]
pathlib2 = dependencies["pathlib2"]
assert pathlib2.pretty_constraint == "^2.2"
assert pathlib2.python_versions == "~2.7"
assert not pathlib2.is_optional()
demo = dependencies["demo"]
assert demo.is_file()
assert not demo.is_vcs()
assert demo.name == "demo"
assert demo.pretty_constraint == "0.1.0"
demo = dependencies["my-package"]
assert not demo.is_file()
assert demo.is_directory()
assert not demo.is_vcs()
assert demo.name == "my-package"
assert demo.pretty_constraint == "0.1.2"
assert demo.package.requires[0].name == "pendulum"
assert demo.package.requires[1].name == "cachy"
assert demo.package.requires[1].extras == ["msgpack"]
simple_project = dependencies["simple-project"]
assert not simple_project.is_file()
assert simple_project.is_directory()
assert not simple_project.is_vcs()
assert simple_project.name == "simple-project"
assert simple_project.pretty_constraint == "1.2.3"
assert simple_project.package.requires == []
assert "db" in package.extras
classifiers = package.classifiers
assert classifiers == [
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
assert package.all_classifiers == [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def test_poetry_with_packages_and_includes():
poetry = Poetry.create(
str(fixtures_dir.parent / "masonry" / "builders" / "fixtures" / "with-include")
)
package = poetry.package
assert package.packages == [
{"include": "extra_dir/**/*.py"},
{"include": "my_module.py"},
{"include": "package_with_include"},
]
assert package.include == ["extra_dir/vcs_excluded.txt", "notes.txt"]
def test_check():
complete = TomlFile(fixtures_dir / "complete.toml")
content = complete.read(raw=True)["tool"]["poetry"]
assert Poetry.check(content)
|
#! python
"""
Modified version of barcode report for use on CCS inputs
"""
from pprint import pformat
import functools
import logging
import json
import os.path as op
import sys
from pbcommand.models import DataStore, FileTypes
from pbcommand.models.report import PlotGroup
from pbcommand.cli import pbparser_runner
from pbcommand.utils import setup_log
from pbcore.io import ConsensusReadSet
from pbreports.report import barcode as barcode_report
from pbreports.report.barcode import (read_inputs, get_barcode_info_parallel,
save_demuxed_dataset_reports)
from pbreports.io.barcode import get_unbarcoded_reads_info
from pbreports.io.specs import load_spec
from pbreports.plot.helper import to_plotgroup
from pbreports.plot.tools import plot_read_lengths_with_cdf
log = logging.getLogger(__name__)
__version__ = "0.2.1"
class Constants(barcode_report.Constants):
TOOL_ID = "pbreports.tasks.barcode_ccs"
TOOL_NAME = "barcode_ccs"
DRIVER_EXE = "python -m pbreports.report.barcode_ccs --resolved-tool-contract"
VERSION = __version__
DOC = __doc__
FILE_TYPE_READS_IN = FileTypes.DS_CCS
SHOW_COLUMNS = [
barcode_report.Constants.C_BIOSAMPLE,
barcode_report.Constants.C_IDX,
barcode_report.Constants.C_BARCODE,
barcode_report.Constants.C_NREADS,
barcode_report.Constants.C_NBASES,
barcode_report.Constants.C_BCQUAL,
barcode_report.Constants.C_RANK
]
SHOW_ATTRIBUTES = [
barcode_report.Constants.A_NBARCODES,
barcode_report.Constants.A_NREADS_BARCODED,
barcode_report.Constants.A_NREADS_UNBARCODED,
barcode_report.Constants.A_MEAN_READS,
barcode_report.Constants.A_MAX_READS,
barcode_report.Constants.A_MIN_READS,
barcode_report.Constants.A_MEAN_RL
]
make_report = functools.partial(barcode_report._make_report_impl,
Constants.SHOW_ATTRIBUTES,
Constants.SHOW_COLUMNS,
Constants.LABEL_NONE)
def run_to_report(ds_bc_file,
barcodes_file,
reads_in_file,
base_dir=None,
datastore_json=None,
nproc=1,
test_mode=False,
min_bq_filter=Constants.MIN_BQ_FILTER):
spec = load_spec("barcode")
inputs = read_inputs(ds_bc_file, barcodes_file, reads_in_file)
read_info, barcoded_zmws, biosamples, subrpt_files, bc_dataset_uuids = get_barcode_info_parallel(
inputs.ds_files,
barcodes_file,
nproc=nproc,
subrpt_output_dir=op.join(base_dir, "sub_reports"),
isoseq_mode=False)
read_info.extend(list(get_unbarcoded_reads_info(
inputs.reads_in, barcoded_zmws)))
if datastore_json is not None:
save_demuxed_dataset_reports(
subrpt_files, base_dir, datastore_json)
else:
barcode_report.write_empty_datastore(datastore_json)
rpt = make_report(biosamples=biosamples,
read_info=read_info,
bc_dataset_uuids=bc_dataset_uuids,
dataset_uuids=inputs.dataset_uuids,
base_dir=base_dir,
use_spec=spec,
test_mode=test_mode,
min_bq_filter=min_bq_filter)
return spec.apply_view(rpt)
def args_runner(args):
log.info("Starting {f} version {v} report generation".format(
f=__file__, v=__version__))
report = run_to_report(args.ds_bc, args.barcodes, args.reads_in,
base_dir=op.dirname(args.report_json),
datastore_json=args.dataset_reports,
nproc=args.nproc,
test_mode=args.test_mode,
min_bq_filter=args.min_bq_filter)
log.info(pformat(report.to_dict()))
report.write_json(args.report_json)
report.tables[0].to_csv(args.report_csv)
return 0
def resolved_tool_contract_runner(rtc):
log.info("Starting {f} version {v} report generation".format(
f=__file__, v=__version__))
report = run_to_report(
ds_bc_file=rtc.task.input_files[0],
barcodes_file=rtc.task.input_files[2],
reads_in_file=rtc.task.input_files[1],
base_dir=op.dirname(rtc.task.output_files[0]),
datastore_json=rtc.task.output_files[2],
nproc=rtc.task.nproc)
log.debug(pformat(report.to_dict()))
report.write_json(rtc.task.output_files[0])
report.tables[0].to_csv(rtc.task.output_files[1])
return 0
def _get_parser():
return barcode_report.get_parser(Constants)
def main(argv=sys.argv):
return pbparser_runner(
argv=argv[1:],
parser=_get_parser(),
args_runner_func=args_runner,
contract_runner_func=resolved_tool_contract_runner,
alog=log,
setup_log_func=setup_log)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
# -*- coding: utf-8 -*-
from typing import List, Dict, AnyStr
from retry import retry
from ratelimit import limits, RateLimitException
import dataiku
from dataiku.customrecipe import get_recipe_config, get_input_names_for_role, get_output_names_for_role
from plugin_io_utils import ErrorHandlingEnum, validate_column_input
from dku_io_utils import set_column_description
from amazon_comprehend_api_client import API_EXCEPTIONS, batch_api_response_parser, get_client
from api_parallelizer import api_parallelizer
from amazon_comprehend_api_formatting import LanguageDetectionAPIFormatter
# ==============================================================================
# SETUP
# ==============================================================================
api_configuration_preset = get_recipe_config().get("api_configuration_preset")
api_quota_rate_limit = api_configuration_preset.get("api_quota_rate_limit")
api_quota_period = api_configuration_preset.get("api_quota_period")
parallel_workers = api_configuration_preset.get("parallel_workers")
batch_size = api_configuration_preset.get("batch_size")
text_column = get_recipe_config().get("text_column")
error_handling = ErrorHandlingEnum[get_recipe_config().get("error_handling")]
input_dataset_name = get_input_names_for_role("input_dataset")[0]
input_dataset = dataiku.Dataset(input_dataset_name)
input_schema = input_dataset.read_schema()
input_columns_names = [col["name"] for col in input_schema]
output_dataset_name = get_output_names_for_role("output_dataset")[0]
output_dataset = dataiku.Dataset(output_dataset_name)
validate_column_input(text_column, input_columns_names)
input_df = input_dataset.get_dataframe()
client = get_client(api_configuration_preset)
column_prefix = "lang_detect_api"
batch_kwargs = {
"api_support_batch": True,
"batch_size": batch_size,
"batch_api_response_parser": batch_api_response_parser,
}
# ==============================================================================
# RUN
# ==============================================================================
@retry((RateLimitException, OSError), delay=api_quota_period, tries=5)
@limits(calls=api_quota_rate_limit, period=api_quota_period)
def call_api_language_detection(batch: List[Dict], text_column: AnyStr) -> List[Dict]:
text_list = [str(r.get(text_column, "")).strip() for r in batch]
responses = client.batch_detect_dominant_language(TextList=text_list)
return responses
df = api_parallelizer(
input_df=input_df,
api_call_function=call_api_language_detection,
api_exceptions=API_EXCEPTIONS,
column_prefix=column_prefix,
text_column=text_column,
parallel_workers=parallel_workers,
error_handling=error_handling,
**batch_kwargs
)
api_formatter = LanguageDetectionAPIFormatter(
input_df=input_df, column_prefix=column_prefix, error_handling=error_handling,
)
output_df = api_formatter.format_df(df)
output_dataset.write_with_schema(output_df)
set_column_description(
input_dataset=input_dataset,
output_dataset=output_dataset,
column_description_dict=api_formatter.column_description_dict,
)
|
#!/usr/bin/python3
# mari von steinkirch @2013
# steinkirch at gmail
def find_edit_distance(str1, str2):
''' computes the edit distance between two strings '''
m = len(str1)
n = len(str2)
diff = lambda c1, c2: 0 if c1 == c2 else 1
E = [[0] * (n + 1) for i in range(m + 1)]
for i in range(m + 1):
E[i][0] = i
for j in range(1, n + 1):
E[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
E[i][j] = min(E[i-1][j] + 1, E[i][j-1] + 1, E[i-1][j-1] + diff(str1[i-1], str2[j-1]))
return E[m][n]
def test_find_edit_distance():
s = 'sunday'
t = 'saturday'
assert(find_edit_distance(s, t) == 3)
print('Tests passed!')
if __name__ == '__main__':
test_find_edit_distance()
|
#!/usr/bin/env python
# coding: utf-8
# # Cats and Dogs Classification
# Data Loading and Exploring
# In[1]:
import os
base_dir = './cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# cat training pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# dog training pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# cat validation pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# dog validation pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
# In[2]:
# view file names
train_cat_fnames = os.listdir(train_cats_dir)
print(train_cat_fnames[:10])
train_dog_fnames = os.listdir(train_dogs_dir)
train_dog_fnames.sort()
print(train_dog_fnames[:10])
# In[3]:
# preview images to know what the dataset is like
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Parameters for our graph; we'll output images in a 4*4 configuration
nrows = 4
ncols = 4
# Index for iterating over images
pic_index = 0
# Set up matplotlib fig, and size it to fit 4*4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)
# 8 images for cats and dogs separately
pic_index += 8
next_cat_pix = [os.path.join(train_cats_dir, fname) for fname in train_cat_fnames[pic_index-8:pic_index]]
next_dog_pix = [os.path.join(train_dogs_dir, fname) for fname in train_dog_fnames[pic_index-8:pic_index]]
for i, img_path in enumerate(next_cat_pix + next_dog_pix):
# Set up subplot; subplot indices starts at 1
sp = plt.subplot(nrows, ncols, i+1)
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
# build a small convnet from scratch to get to 72% accuracy
# In[4]:
from tensorflow.keras import layers
from tensorflow.keras import Model
# Our input feature map is 150*150*3: 150*150 for the image pixels,
# and 3 for the three color channels: R, G and B
img_input = layers.Input(shape=(150,150,3))
# First convolution extracts 16 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(16,3,activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
# Second convolution extracts 32 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(32,3,activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# Third convolution extracts 64 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(64,3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# fully-connected layers: because we are facing a binary classification problem, we will end our network with a sigmoid activation, so that the output of our network will be a single scalar between 0 and 1.
# In[5]:
# Flatten feature map to a 1-dim tensor so we can add fully connected layers
x = layers.Flatten()(x)
# Generate a fully connected layer with ReLU activation and 512 hidden units
x = layers.Dense(512,activation='relu')(x)
# Create output layer with a single node and sigmoid activation
output = layers.Dense(1, activation='sigmoid')(x)
# Create Model
# input = input feature map
# output = output feature map
# connected layer + sigmoid output layer
model = Model(img_input,output)
# Let's summarize the model architecture
# In[6]:
model.summary()
# In[7]:
# use RMSprop instead of stochastic gradient
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])
# Data Preprocessing
# In[8]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150,150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary'
)
# Flow validation images in batches of 20 using val_datagen generator
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
# Training
# <br>train on 2000 images, for 15 epochs and validate on 1000 images
# In[ ]:
history = model.fit_generator(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=15,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=1
)
# Visualizing Intermediate Representations
# Visualize how an input gets transformed as it goes through the convnet
# In[ ]:
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
# define a new Model that takes an img as input and will output
# intermediate representations for all layers in the previous model after
# the first
successive_outputs = [layers.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)
# prepare a random input img of a cat or dog from the training set
cat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames]
dog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames]
img_path = random.choice(cat_img_files + dog_img_files)
img = load_img(img_path, target_size=(150, 150)) # this is a PIL img
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape)
# Rescale by 1/255
x /= 255
# Let's run our image through our network, thus obtaining all
# intermediate representations for this img.
successive_feature_maps = visualization_model.predict(x)
# These are names of the layers
layer_names = [layer.name for layer in model.layers]
# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv/ maxpool layers, not the fully-connected layers
n_features = feature_map.shape[-1] # number of features in feature map
# retrieve a list of lists results on training and validattion data
# sets for each training epoch
loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Train and validation accuracy')
plt.figure()
# plot training and validation loss per epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
# Evaluating Accuracy and Loss for the Model
# plot the training / validation accuracy and loss as collected during training
# In[ ]:
# Retrieve a list of accuracy results on training and validation data
# sets for each training epoch
acc = history.history['acc']
val_acc = history.history['val_acc']
# Retrieve a list of list results on training and validation data
# sets for each training epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
# Plot training and validation loss per epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
# Clean Up
# In[ ]:
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pathlib
from collections import namedtuple
import torchaudio
import shutil
Speaker = namedtuple('Speaker', ['id', 'gender', 'subset'])
FileRecord = namedtuple(
'FileRecord', ['fname', 'length', 'speaker', 'book', 'text_file'])
def get_speakers(speaker_path):
all_speakers = []
with open(speaker_path) as f:
for line in f:
if line.startswith(';'):
continue
line = line.split('|')
speaker_id, gender, subset = [x.strip() for x in line[0:3]]
speaker_id = int(speaker_id)
assert subset in ['test-clean', 'train-clean-360', 'train-clean-100',
'test-other', 'dev-clean', 'train-other-500', 'dev-other'], subset
speaker = Speaker(id=speaker_id, gender=gender, subset=subset)
all_speakers.append(speaker)
return all_speakers
def get_filelength(fname):
info = torchaudio.info(fname)[0]
return info.length
def traverse_tree(root, ext='flac'):
fnames = pathlib.Path(root).rglob(f"*.{ext}")
fnames = sorted(list(fnames))
lengths = []
for file in fnames:
file = str(file.resolve())
length = get_filelength(file)
lengths.append(length)
return list(zip(fnames, lengths))
def get_speaker_fname(fname):
stemmed = fname.stem
speaker, book, seq = stemmed.split('-')
return int(speaker), int(book)
def full_records(speakers, fname2length, subset_name=None):
all_records = []
speakers = dict((speaker.id, speaker) for speaker in speakers)
for fname, length in fname2length:
speaker, book = get_speaker_fname(fname)
assert speaker in speakers, f'Unknown speaker! {speaker}'
speaker = speakers[speaker]
if subset_name is not None:
assert subset_name == speaker.subset
# hacky
text_file = fname.parent / f'{speaker.id}-{book}.trans.txt'
frecord = FileRecord(speaker=speaker, length=length,
fname=fname, book=book, text_file=text_file)
all_records.append(frecord)
return all_records
def get_histogram(records, lambda_key, lambda_value):
from collections import defaultdict
key_value = defaultdict(int)
for record in records:
key = lambda_key(record)
value = lambda_value(record)
key_value[key] += value
return key_value
def materialize(records, target_dir, tag=None, move=False):
target_dir = pathlib.Path(target_dir)
to_copy = set()
to_move = set()
for record in records:
# outline:
# target_dir / speaker / book / file
if tag is None:
target_book_dir = target_dir / \
str(record.speaker.id) / str(record.book)
else:
target_book_dir = target_dir / tag / \
str(record.speaker.id) / str(record.book)
target_book_dir.mkdir(exist_ok=True, parents=True)
if not move:
to_copy.add((record.fname, target_book_dir / record.fname.name))
else:
to_move.add((record.fname, target_book_dir / record.fname.name))
to_copy.add((record.text_file, target_book_dir / record.text_file.name))
to_copy = sorted(list(to_copy))
for src, dst in to_copy:
shutil.copy(src, dst)
if len(to_move) > 0:
to_move = sorted(list(to_move))
for src, dst in to_move:
shutil.move(src, dst)
def print_stats(records):
def lambda_speaker(r): return r.speaker.id
def lambda_time(r): return r.length / 16000.0
speaker_time = get_histogram(
records, lambda_key=lambda_speaker, lambda_value=lambda_time)
print(f'Unique speakers: {len(speaker_time)}')
times = speaker_time.values()
min_time, max_time, mean_time, total_time = min(
times), max(times), sum(times) / len(times), sum(times)
min_time, max_time, mean_time, total_time = map(
int, [min_time, max_time, mean_time, total_time])
print(
f'Min/Mean/Max/Total, seconds: {min_time}/{mean_time}/{max_time}/{total_time}')
print(f'n_utterances: {len(records)}')
|
import os
PROJECT_DIR = os.path.abspath(os.pardir)
RUN_DIR = os.path.join(PROJECT_DIR, "runs/")
DATA_DIR = os.path.join(PROJECT_DIR, "data/")
EMBEDDINGS_DIR = os.path.join(PROJECT_DIR, "embeddings/")
|
"""
# BEGIN TAG_DEMO
>>> tag('br') # <1>
'<br />'
>>> tag('p', 'hello') # <2>
'<p>hello</p>'
>>> print(tag('p', 'hello', 'world'))
<p>hello</p>
<p>world</p>
>>> tag('p', 'hello', id=33) # <3>
'<p id="33">hello</p>'
>>> print(tag('p', 'hello', 'world', cls='sidebar')) # <4>
<p class="sidebar">hello</p>
<p class="sidebar">world</p>
>>> tag(content='testing', name="img") # <5>
'<img content="testing" />'
>>> my_tag = {'name': 'img', 'title': 'Sunset Boulevard',
... 'src': 'sunset.jpg', 'cls': 'framed'}
>>> tag(**my_tag) # <6>
'<img class="framed" src="sunset.jpg" title="Sunset Boulevard" />'
# END TAG_DEMO
"""
# BEGIN TAG_FUNC
def tag(name, *content, cls=None, **attrs):
"""Generate one or more HTML tags"""
if cls is not None:
attrs['class'] = cls
if attrs:
attr_str = ''.join(' %s="%s"' % (attr, value)
for attr, value
in sorted(attrs.items()))
else:
attr_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' %
(name, attr_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attr_str)
# END TAG_FUNC
|
from functools import lru_cache
class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
@lru_cache(None)
def helper(b,e):
print(b,e)
if b > e : return 0
if b == e : return 1
if s[b] == s[e] :
return helper(b+1,e-1) + 2
return max(helper(b+1,e), helper(b,e-1))
return helper(0,len(s)-1)
s = Solution()
ans = s.longestPalindromeSubseq('bcbbd')
print(ans)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path("/checkpoint/{}/experiments".format(user))
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / "{}_init".format(uuid.uuid4().hex)
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as detection
self._setup_gpu_args()
detection.main(self.args)
def checkpoint(self):
import os
import submitit
from pathlib import Path
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print("Process group: {} tasks, rank: {}".format(job_env.num_tasks,job_env.global_rank))
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
# cluster setup is defined by environment variables
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
)
executor.update_parameters(name="detr")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
|
"""Entity for Zigbee Home Automation."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
import functools
import logging
from typing import TYPE_CHECKING, Any
from homeassistant.const import ATTR_NAME
from homeassistant.core import CALLBACK_TYPE, Event, callback
from homeassistant.helpers import entity
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
from .core.const import (
ATTR_MANUFACTURER,
ATTR_MODEL,
DATA_ZHA,
DATA_ZHA_BRIDGE_ID,
DOMAIN,
SIGNAL_GROUP_ENTITY_REMOVED,
SIGNAL_GROUP_MEMBERSHIP_CHANGE,
SIGNAL_REMOVE,
)
from .core.helpers import LogMixin
if TYPE_CHECKING:
from .core.channels.base import ZigbeeChannel
from .core.device import ZHADevice
_LOGGER = logging.getLogger(__name__)
ENTITY_SUFFIX = "entity_suffix"
UPDATE_GROUP_FROM_CHILD_DELAY = 0.5
class BaseZhaEntity(LogMixin, entity.Entity):
"""A base class for ZHA entities."""
unique_id_suffix: str | None = None
def __init__(self, unique_id: str, zha_device: ZHADevice, **kwargs: Any) -> None:
"""Init ZHA entity."""
self._name: str = ""
self._force_update: bool = False
self._should_poll: bool = False
self._unique_id: str = unique_id
if self.unique_id_suffix:
self._unique_id += f"-{self.unique_id_suffix}"
self._state: Any = None
self._extra_state_attributes: dict[str, Any] = {}
self._zha_device = zha_device
self._unsubs: list[Callable[[], None]] = []
self.remove_future: asyncio.Future[Any] = asyncio.Future()
@property
def name(self) -> str:
"""Return Entity's default name."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def zha_device(self) -> ZHADevice:
"""Return the zha device this entity is attached to."""
return self._zha_device
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return device specific state attributes."""
return self._extra_state_attributes
@property
def force_update(self) -> bool:
"""Force update this entity."""
return self._force_update
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return self._should_poll
@property
def device_info(self) -> entity.DeviceInfo:
"""Return a device description for device registry."""
zha_device_info = self._zha_device.device_info
ieee = zha_device_info["ieee"]
return entity.DeviceInfo(
connections={(CONNECTION_ZIGBEE, ieee)},
identifiers={(DOMAIN, ieee)},
manufacturer=zha_device_info[ATTR_MANUFACTURER],
model=zha_device_info[ATTR_MODEL],
name=zha_device_info[ATTR_NAME],
via_device=(DOMAIN, self.hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID]),
)
@callback
def async_state_changed(self) -> None:
"""Entity state changed."""
self.async_write_ha_state()
@callback
def async_update_state_attribute(self, key: str, value: Any) -> None:
"""Update a single device state attribute."""
self._extra_state_attributes.update({key: value})
self.async_write_ha_state()
@callback
def async_set_state(self, attr_id: int, attr_name: str, value: Any) -> None:
"""Set the entity state."""
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
@callback
def async_accept_signal(
self,
channel: ZigbeeChannel | None,
signal: str,
func: Callable[..., Any],
signal_override=False,
):
"""Accept a signal from a channel."""
unsub = None
if signal_override:
unsub = async_dispatcher_connect(self.hass, signal, func)
else:
assert channel
unsub = async_dispatcher_connect(
self.hass, f"{channel.unique_id}_{signal}", func
)
self._unsubs.append(unsub)
def log(self, level: int, msg: str, *args, **kwargs):
"""Log a message."""
msg = f"%s: {msg}"
args = (self.entity_id,) + args
_LOGGER.log(level, msg, *args, **kwargs)
class ZhaEntity(BaseZhaEntity, RestoreEntity):
"""A base class for non group ZHA entities."""
def __init_subclass__(cls, id_suffix: str | None = None, **kwargs) -> None:
"""Initialize subclass.
:param id_suffix: suffix to add to the unique_id of the entity. Used for multi
entities using the same channel/cluster id for the entity.
"""
super().__init_subclass__(**kwargs)
if id_suffix:
cls.unique_id_suffix = id_suffix
def __init__(
self,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs: Any,
) -> None:
"""Init ZHA entity."""
super().__init__(unique_id, zha_device, **kwargs)
ieeetail = "".join([f"{o:02x}" for o in zha_device.ieee[:4]])
ch_names = ", ".join(sorted(ch.name for ch in channels))
self._name: str = f"{zha_device.name} {ieeetail} {ch_names}"
if self.unique_id_suffix:
self._name += f" {self.unique_id_suffix}"
self.cluster_channels: dict[str, ZigbeeChannel] = {}
for channel in channels:
self.cluster_channels[channel.name] = channel
@classmethod
def create_entity(
cls,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs,
) -> ZhaEntity | None:
"""Entity Factory.
Return entity if it is a supported configuration, otherwise return None
"""
return cls(unique_id, zha_device, channels, **kwargs)
@property
def available(self) -> bool:
"""Return entity availability."""
return self._zha_device.available
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
self.remove_future = asyncio.Future()
self.async_accept_signal(
None,
f"{SIGNAL_REMOVE}_{self.zha_device.ieee}",
functools.partial(self.async_remove, force_remove=True),
signal_override=True,
)
if last_state := await self.async_get_last_state():
self.async_restore_last_state(last_state)
self.async_accept_signal(
None,
f"{self.zha_device.available_signal}_entity",
self.async_state_changed,
signal_override=True,
)
self._zha_device.gateway.register_entity_reference(
self._zha_device.ieee,
self.entity_id,
self._zha_device,
self.cluster_channels,
self.device_info,
self.remove_future,
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
await super().async_will_remove_from_hass()
self.zha_device.gateway.remove_entity_reference(self)
self.remove_future.set_result(True)
@callback
def async_restore_last_state(self, last_state) -> None:
"""Restore previous state."""
async def async_update(self) -> None:
"""Retrieve latest state."""
tasks = [
channel.async_update()
for channel in self.cluster_channels.values()
if hasattr(channel, "async_update")
]
if tasks:
await asyncio.gather(*tasks)
class ZhaGroupEntity(BaseZhaEntity):
"""A base class for ZHA group entities."""
def __init__(
self, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
"""Initialize a light group."""
super().__init__(unique_id, zha_device, **kwargs)
self._available = False
self._group = zha_device.gateway.groups.get(group_id)
self._name = f"{self._group.name}_zha_group_0x{group_id:04x}"
self._group_id: int = group_id
self._entity_ids: list[str] = entity_ids
self._async_unsub_state_changed: CALLBACK_TYPE | None = None
self._handled_group_membership = False
self._change_listener_debouncer: Debouncer | None = None
@property
def available(self) -> bool:
"""Return entity availability."""
return self._available
@classmethod
def create_entity(
cls, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> ZhaGroupEntity | None:
"""Group Entity Factory.
Return entity if it is a supported configuration, otherwise return None
"""
return cls(entity_ids, unique_id, group_id, zha_device, **kwargs)
async def _handle_group_membership_changed(self):
"""Handle group membership changed."""
# Make sure we don't call remove twice as members are removed
if self._handled_group_membership:
return
self._handled_group_membership = True
await self.async_remove(force_remove=True)
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
await self.async_update()
self.async_accept_signal(
None,
f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{self._group_id:04x}",
self._handle_group_membership_changed,
signal_override=True,
)
if self._change_listener_debouncer is None:
self._change_listener_debouncer = Debouncer(
self.hass,
_LOGGER,
cooldown=UPDATE_GROUP_FROM_CHILD_DELAY,
immediate=False,
function=functools.partial(self.async_update_ha_state, True),
)
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self._entity_ids, self.async_state_changed_listener
)
def send_removed_signal():
async_dispatcher_send(
self.hass, SIGNAL_GROUP_ENTITY_REMOVED, self._group_id
)
self.async_on_remove(send_removed_signal)
@callback
def async_state_changed_listener(self, event: Event):
"""Handle child updates."""
# Delay to ensure that we get updates from all members before updating the group
assert self._change_listener_debouncer
self.hass.create_task(self._change_listener_debouncer.async_call())
async def async_will_remove_from_hass(self) -> None:
"""Handle removal from Home Assistant."""
await super().async_will_remove_from_hass()
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self) -> None:
"""Update the state of the group entity."""
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals
from exmail.client.api.base import EmailBaseAPI
class Department(EmailBaseAPI):
def create(self, department_data):
'''
创建部门
:param department_data: 创建部门所需数据
:return:
'''
return self._post(
'/department/create',
data=department_data
)
def update(self, department_data):
'''
创建部门
:param department_data: 更新部门所需数据
:return:
'''
return self._post(
'/department/update',
data=department_data
)
def delete(self, _id):
'''
删除部门
:param _id: 部门id (注:不能删除根部门;不能删除含有子部门、成员的部门)
:return:
'''
return self._get(
'/department/delete',
{'id': _id}
)
def list(self, _id=1):
"""
获取部门列表
:param _id: 父部门id(如果不传,默认部门为根部门,根部门ID为1)
:return: 部门列表数据。以部门的order字段从小到大排列
"""
return self._get(
'/department/list',
{'id': _id}
)
def search(self, name, fuzzy=False):
'''
查找部门
:param name: 查找的部门名字,必须合法
:param fuzzy: 是否模糊匹配
:return:
'''
return self._post(
'/department/search',
data={'name': name, 'fuzzy': int(fuzzy)}
)
|
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALBERT model. """
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.configuration_albert import AlbertConfig
from transformers.modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer
from transformers.modeling_utils import PreTrainedModel
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
logger = logging.getLogger(__name__)
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-pytorch_model.bin",
"albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-pytorch_model.bin",
"albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-pytorch_model.bin",
"albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-pytorch_model.bin",
"albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-pytorch_model.bin",
"albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-pytorch_model.bin",
"albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-pytorch_model.bin",
"albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-pytorch_model.bin",
}
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
print(name)
for name, array in zip(names, arrays):
original_name = name
# If saved from the TF HUB module
name = name.replace("module/", "")
# Renaming and simplifying
name = name.replace("ffn_1", "ffn")
name = name.replace("bert/", "albert/")
name = name.replace("attention_1", "attention")
name = name.replace("transform/", "")
name = name.replace("LayerNorm_1", "full_layer_layer_norm")
name = name.replace("LayerNorm", "attention/LayerNorm")
name = name.replace("transformer/", "")
# The feed forward layer had an 'intermediate' step which has been abstracted away
name = name.replace("intermediate/dense/", "")
name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
# ALBERT attention was split between self and output which have been abstracted away
name = name.replace("/output/", "/")
name = name.replace("/self/", "/")
# The pooler is a linear layer
name = name.replace("pooler/dense", "pooler")
# The classifier was simplified to predictions from cls/predictions
name = name.replace("cls/predictions", "predictions")
name = name.replace("predictions/attention", "predictions")
# Naming was changed to be more explicit
name = name.replace("embeddings/attention", "embeddings")
name = name.replace("inner_group_", "albert_layers/")
name = name.replace("group_", "albert_layer_groups/")
# Classifier
if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
name = "classifier/" + name
# No ALBERT model currently handles the next sentence prediction task
if "seq_relationship" in name:
continue
name = name.split("/")
# Ignore the gradients applied by the LAMB/ADAM optimizers.
if "adam_m" in name or "adam_v" in name or "global_step" in name:
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {} from {}".format(name, original_name))
pointer.data = torch.from_numpy(array)
return model
class AlbertEmbeddings(BertEmbeddings):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
class AlbertAttention(BertSelfAttention):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.num_attention_heads, self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
# Update hyper params and store pruned heads
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_ids, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(input_ids)
mixed_key_layer = self.key(input_ids)
mixed_value_layer = self.value(input_ids)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# Should find a better way to do this
w = (
self.dense.weight.t()
.view(self.num_attention_heads, self.attention_head_size, self.hidden_size)
.to(context_layer.dtype)
)
b = self.dense.bias.to(context_layer.dtype)
projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b
projected_context_layer_dropout = self.dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if self.output_attentions else (layernormed_context_layer,)
class AlbertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states, attention_mask=None, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
ffn_output = self.ffn(attention_output[0])
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:] # add attentions if we output them
class AlbertLayerGroup(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index])
hidden_states = layer_output[0]
if self.output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if self.output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if self.output_attentions:
outputs = outputs + (layer_attentions,)
return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
class AlbertTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_attentions = ()
if self.output_hidden_states:
all_hidden_states = (hidden_states,)
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
)
hidden_states = layer_group_output[0]
if self.output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class AlbertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "albert"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ALBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.AlbertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups.
If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there
is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error.
See base class PreTrainedModel for more information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
from transformers import AlbertModel, AlbertTokenizer
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertModel.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs
class AlbertMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states + self.bias
return prediction_scores
@add_start_docstrings(
"Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING,
)
class AlbertForMaskedLM(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)
def get_output_embeddings(self):
return self.predictions.decoder
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with
labels in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
from transformers import AlbertTokenizer, AlbertForMaskedLM
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForMaskedLM.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_outputs = outputs[0]
prediction_scores = self.predictions(sequence_outputs)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs
@add_start_docstrings(
"""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import AlbertTokenizer, AlbertForSequenceClassification
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForSequenceClassification.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
ALBERT_START_DOCSTRING,
)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
end_scores: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# The checkpoint albert-base-v2 is not fine-tuned for question answering. Please see the
# examples/run_squad.py example to see how to fine-tune a model to a question answering task.
from transformers import AlbertTokenizer, AlbertForQuestionAnswering
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer.encode_plus(question, text, return_tensors='pt')
start_scores, end_scores = model(**input_dict)
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
|
import datetime as dt
import os
# ログファイル差分チェッカー
# ファイル名を受け取り、update()実行ごとに差分を返す
# 更新実行時の時間をもつ
# 更新にかかった総所要時間の計測はせず、上のレイヤーに任せる
# 監視対象のファイルは行を追加する形で情報が増えていくものとする
class LogReader:
# filename, last_update, tail_ix
def __init__(self, filename):
self.filename = filename
self.last_update = dt.datetime.now()
assert os.path.isfile(filename), '存在しないファイルを指定しました'
with open(self.filename, 'r') as f:
lines = f.readlines()
self.tail_ix = len(lines) # 末尾の行数(1から数えた場合の) = 次回更新でこの行数から読み込む
# TODO ファイルの存在をassertできるともっと良い(import osでexists?)
# TODO ログ出力はこれでよいのか?Viewクラス使ったほうがよいのでは?
# self.__printlog('初期化成功。'+self.filename+'を監視します。')
# 同期。last_updateを更新し、差分を取得し、tail_ixを更新。差分(文字列のリスト)を返す。無ければ空のリスト。
def added_lines(self):
self.last_update = dt.datetime.now()
with open(self.filename, 'r') as f:
all_lines = f.readlines()
added_lines = all_lines[self.tail_ix:]
self.tail_ix = len(all_lines)
return added_lines
# def __printlog(self, mes):
# print('['+str(dt.datetime.now())[:-7]+']', mes)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-20 21:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='state',
field=models.CharField(choices=[(b'review', b'Review'), (b'archived', b'Archived'), (b'private', b'Private'), (b'published', b'Published')], default=b'private', max_length=20, verbose_name='State'),
),
]
|
import time
import torch
from torchtext.experimental.datasets import AG_NEWS
from torchtext.experimental.vectors import FastText as FastTextExperimental
from torchtext.vocab import FastText
def benchmark_experimental_vectors():
def _run_benchmark_lookup(tokens, vector):
t0 = time.monotonic()
for token in tokens:
vector[token]
print("Lookup time:", time.monotonic() - t0)
train, = AG_NEWS(data_select='train')
vocab = train.get_vocab()
tokens = []
for (label, text) in train:
for id in text.tolist():
tokens.append(vocab.itos[id])
# existing FastText construction
print("Existing FastText - Not Jit Mode")
t0 = time.monotonic()
fast_text = FastText()
print("Construction time:", time.monotonic() - t0)
_run_benchmark_lookup(tokens, fast_text)
# experimental FastText construction
print("FastText Experimental")
t0 = time.monotonic()
fast_text_experimental = FastTextExperimental(validate_file=False)
print("Construction time:", time.monotonic() - t0)
# not jit lookup
print("FastText Experimental - Not Jit Mode")
_run_benchmark_lookup(tokens, fast_text_experimental)
# jit lookup
print("FastText Experimental - Jit Mode")
jit_fast_text_experimental = torch.jit.script(fast_text_experimental)
_run_benchmark_lookup(tokens, jit_fast_text_experimental)
if __name__ == "__main__":
benchmark_experimental_vectors()
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
from tacker.db.migration.models import head # noqa
from tacker.db.migration import purge_tables
HEAD_FILENAME = 'HEAD'
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine')),
]
CONF = cfg.ConfigOpts()
CONF.register_cli_opts(_db_opts, 'database')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
validate_head_file(config)
def do_upgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
revision = '+%s' % str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
update_head_file(config)
def validate_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
if (os.path.isfile(head_path) and
open(head_path).read().strip() == script.get_current_head()):
return
else:
alembic_util.err(_('HEAD file does not match migration timeline head'))
def update_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
with open(head_path, 'w+') as f:
f.write(script.get_current_head())
def purge_deleted(config, cmd):
"""Remove database records that have been previously soft deleted."""
purge_tables.purge_deleted(config.tacker_config,
CONF.command.resource,
CONF.command.age,
CONF.command.granularity)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
parser = subparsers.add_parser('upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
parser = subparsers.add_parser('purge_deleted')
parser.set_defaults(func=purge_deleted)
# positional parameter
parser.add_argument(
'resource',
choices=['all', 'events', 'vnf', 'vnfd', 'vims'],
help=_('Resource name for which deleted entries are to be purged.'))
# optional parameter, can be skipped. default='90'
parser.add_argument('-a', '--age', nargs='?', default='90',
help=_('How long to preserve deleted data,'
'defaults to 90'))
# optional parameter, can be skipped. default='days'
parser.add_argument(
'-g', '--granularity', default='days',
choices=['days', 'hours', 'minutes', 'seconds'],
help=_('Granularity to use for age argument, defaults to days.'))
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'tacker.db.migration:alembic_migrations')
# attach the Tacker conf to the Alembic conf
config.tacker_config = CONF
CONF()
# TODO(gongysh) enable logging
CONF.command.func(config, CONF.command.name)
|
def check_lists(l1, l2):
def contains(l1, l2):
|
"""Loads deepmind_lab.so."""
import imp
import pkg_resources
imp.load_dynamic(__name__, pkg_resources.resource_filename(
__name__, 'deepmind_lab.so'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-05 02:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("PartyListV2", "0002_restrictedguest"),
]
operations = [
migrations.AddField(
model_name="partyguest",
name="hasPrepartyAccess",
field=models.BooleanField(default=False),
),
]
|
import random
from urllib.request import urlopen
import sys
WORD_URL="http://learncodethehardway.org/words.txt"
WORDS=[]
PHRASES={"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef _init_(self,***)":
"class %%% has-a _init_that takes self and *** params.",
"class %%%(object):\n\tdef ***(self,@@@):
"class %%% has-a function *** that takes self and @@@ params.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function,call it with params self,@@@.",
"***.***='***'":
"From *** get the *** attribute and set it to '***'."}
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASES_FIRST=True
else:
PHRASES_FIRST=False
for word in urlopen(WORLD_URL.readlines():
WORDS.appen(str(word.strip(),encoding="utf-8"))
def convert(snippet,phrase):
class_names=[w.capitalized() for w in
random.sample(WORDS,snippet.count("%%%"))]
other_names=random.sample(WORDS,sinppet.count("***"))
results=[]
param_name=[]
for i in range(0,snippet.count("@@@")):
param-count=random.randint(1,3)
param_names.append(','.join(
radom.sample(WORDS,param_count)))
for word in class_names:
result=result.replace("%%%",word,1)
for word in other_names:
result=result.replace("@@@",word,1)
results.append(result0
return results
try:
while True:
snippes=list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase=PHRASES[snippet,phrase)
if PHRASES_FIRST:
question,answer=answer,question
print(question)
input(">")
print(f"ANSWER: {answer}\n\")
except EOFError:
print("\nBye")
|
"""Auto-generated file, do not edit by hand. HR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HR = PhoneMetadata(id='HR', country_code=385, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-7]\\d{5,8}|[89]\\d{6,11}', possible_number_pattern='\\d{6,12}'),
fixed_line=PhoneNumberDesc(national_number_pattern='1\\d{7}|(?:2[0-3]|3[1-5]|4[02-47-9]|5[1-3])\\d{6}', possible_number_pattern='\\d{6,8}', example_number='12345678'),
mobile=PhoneNumberDesc(national_number_pattern='9[1257-9]\\d{6,10}', possible_number_pattern='\\d{8,12}', example_number='912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{4,7}', possible_number_pattern='\\d{7,10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='6(?:[09]\\d{7}|[145]\\d{4,7})', possible_number_pattern='\\d{6,9}', example_number='611234'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='7[45]\\d{4,7}', possible_number_pattern='\\d{6,9}', example_number='741234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='62\\d{6,7}', possible_number_pattern='\\d{8,9}', example_number='62123456'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(1)(\\d{4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(6[09])(\\d{4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[09]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(62)(\\d{3})(\\d{3,4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['62'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='([2-5]\\d)(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['[2-5]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3})(\\d{3,4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{4})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3,4})(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3 \\4', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2,3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[145]|7'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3,4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[145]|7'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(80[01])(\\d{2})(\\d{2,3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(80[01])(\\d{3,4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule=u'0\\1')],
mobile_number_portable_region=True)
|
from django.contrib import messages
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from ...admin.views import generic
from ..models import Agreement
from .forms import AgreementForm, FilterAgreementsForm
from .utils import disable_agreement, set_agreement_as_active
class AgreementAdmin(generic.AdminBaseMixin):
root_link = "misago:admin:settings:agreements:index"
model = Agreement
form_class = AgreementForm
templates_dir = "misago/admin/agreements"
message_404 = _("Requested agreement does not exist.")
def handle_form(self, form, request, target):
form.save()
if self.message_submit:
messages.success(
request, self.message_submit % {"title": target.get_final_title()}
)
class AgreementsList(AgreementAdmin, generic.ListView):
items_per_page = 30
ordering = [("-id", _("From newest")), ("id", _("From oldest"))]
filter_form = FilterAgreementsForm
selection_label = _("With agreements: 0")
empty_selection_label = _("Select agreements")
mass_actions = [
{
"action": "delete",
"name": _("Delete agreements"),
"confirmation": _("Are you sure you want to delete those agreements?"),
}
]
def get_queryset(self):
qs = super().get_queryset()
return qs.select_related()
def action_delete(self, request, items):
items.delete()
Agreement.objects.invalidate_cache()
messages.success(request, _("Selected agreements have been deleted."))
class NewAgreement(AgreementAdmin, generic.ModelFormView):
message_submit = _('New agreement "%(title)s" has been saved.')
def handle_form(self, form, request, target):
super().handle_form(form, request, target)
form.instance.set_created_by(request.user)
form.instance.save()
Agreement.objects.invalidate_cache()
class EditAgreement(AgreementAdmin, generic.ModelFormView):
message_submit = _('Agreement "%(title)s" has been edited.')
def handle_form(self, form, request, target):
super().handle_form(form, request, target)
form.instance.last_modified_on = timezone.now()
form.instance.set_last_modified_by(request.user)
form.instance.save()
Agreement.objects.invalidate_cache()
class DeleteAgreement(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
target.delete()
Agreement.objects.invalidate_cache()
message = _('Agreement "%(title)s" has been deleted.')
messages.success(request, message % {"title": target.get_final_title()})
class SetAgreementAsActive(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
set_agreement_as_active(target, commit=True)
message = _('Agreement "%(title)s" has been set as active for type "%(type)s".')
targets_names = {
"title": target.get_final_title(),
"type": target.get_type_display(),
}
messages.success(request, message % targets_names)
class DisableAgreement(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
disable_agreement(target, commit=True)
message = _('Agreement "%(title)s" has been disabled.') % {
"title": target.get_final_title()
}
messages.success(request, message)
|
"""
WSGI config for apiwrapper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiwrapper.settings')
application = get_wsgi_application()
|
from django.urls import path
from profiles_api import views
urlpatterns =[
path('hello-view/',views.HelloApiView.as_view())
]
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
class GenerateSequence(ExternalTransform):
"""
An external PTransform which provides a bounded or unbounded stream of
integers.
Note: To use this transform, you need to start the Java expansion service.
Please refer to the portability documentation on how to do that. The
expansion service address has to be provided when instantiating this
transform. During pipeline translation this transform will be replaced by
the Java SDK's GenerateSequence.
If you start Flink's job server, the expansion service will be started on
port 8097. This is also the configured default for this transform. For a
different address, please set the expansion_service parameter.
For more information see:
- https://beam.apache.org/documentation/runners/flink/
- https://beam.apache.org/roadmap/portability/
Note: Runners need to support translating Read operations in order to use
this source. At the moment only the Flink Runner supports this.
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:generate_sequence:v1'
def __init__(
self,
start,
stop=None,
elements_per_period=None,
max_read_time=None,
expansion_service=None):
super(GenerateSequence, self).__init__(
self.URN,
ImplicitSchemaPayloadBuilder({
'start': start,
'stop': stop,
'elements_per_period': elements_per_period,
'max_read_time': max_read_time,
}),
expansion_service)
|
import azureml
from azureml.core import VERSION
from azureml.core import Workspace, Experiment, Datastore, Environment
from azureml.core.runconfig import RunConfiguration
from azureml.data.datapath import DataPath, DataPathComputeBinding
from azureml.data.data_reference import DataReference
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
from azureml.pipeline.steps import PythonScriptStep, EstimatorStep
from azureml.train.estimator import Estimator
import sys, getopt, os
## Get arguments
def printhelp():
print ('Arguments:')
print (' -d Data Store name')
print (' -p Data Store Path')
print (' -c Compute Target name')
print (' -v Universal Package version (for deployment and inferencing code)')
print (' -s Azure Subscription id')
print (' -a Storage Account name')
print (' -k Storage Account key')
print (' -r Resource Group name')
print (' -w Machine Learning workspace name')
datastorename=''
datastorepath=''
computetarget=''
packageversion=''
workspace_name=''
subscription_id=''
resource_group=''
storage_account=''
storage_account_key=''
try:
print('Arguments: ', sys.argv[1:])
opts, args = getopt.getopt(sys.argv[1:],"d:p:c:v:s:a:k:r:w:")
except getopt.GetoptError:
printhelp
for opt, arg in opts:
if opt == '-h':
printhelp
elif opt == '-d':
datastorename = arg
elif opt == '-p':
datastorepath = arg
elif opt == '-c':
computetarget = arg
elif opt == '-v':
packageversion = arg
elif opt == '-s':
subscription_id = arg
elif opt == '-a':
storage_account = arg
elif opt == '-k':
storage_account_key = arg
elif opt == '-r':
resource_group = arg
elif opt == '-w':
workspace_name = arg
print("Azure ML SDK Version: ", VERSION)
#### Connect to our workspace ####
##################################
# workspace
ws = Workspace.get( name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group)
# data
ds = Datastore.register_azure_blob_container(workspace=ws,
datastore_name=datastorename,
container_name='seer-container',
account_name=storage_account,
account_key=storage_account_key,
create_if_not_exists=True)
datastore = ws.datastores[datastorename]
# compute target
try:
cpu_cluster = ComputeTarget(workspace=ws, name=computetarget)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
min_nodes=1,
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, computetarget, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
compute = ws.compute_targets[computetarget]
#### Define Pipeline! ####
##########################
# The following will be created and then run:
# 1. Pipeline Parameters
# 2. Data Process Step
# 3. Training Step
# 4. Model Registration Step
# 5. Pipeline registration
# 6. Submit the pipeline for execution
## Pipeline Parameters ##
# We need to tell the Pipeline what it needs to learn to see!
datapath = DataPath(datastore=datastore, path_on_datastore=datastorepath)
data_path_pipeline_param = (PipelineParameter(name="data",
default_value=datapath),
DataPathComputeBinding(mode='mount'))
# Configuration for data prep and training steps #
dataprepEnvironment = Environment.from_pip_requirements('dataprepenv', 'requirements-dataprepandtraining.txt')
dataprepRunConfig = RunConfiguration()
dataprepRunConfig.environment = dataprepEnvironment
## Data Process Step ##
# parse.py file parses the images in our data source #
seer_tfrecords = PipelineData(
"tfrecords_set",
datastore=datastore,
is_directory=True
)
prepStep = PythonScriptStep(
'parse.py',
source_directory='.',
name='Data Preparation',
compute_target=compute,
arguments=["--source_path", data_path_pipeline_param, "--target_path", seer_tfrecords],
runconfig=dataprepRunConfig,
inputs=[data_path_pipeline_param],
outputs=[seer_tfrecords],
allow_reuse=True # Allow reuse of the data prep step
)
## Training Step ##
# train.py does the training based on the processed data #
seer_training = PipelineData(
"train",
datastore=datastore,
is_directory=True
)
train = Estimator(source_directory='.',
compute_target=compute,
entry_script='train.py',
use_gpu=True,
pip_requirements_file='requirements-dataprepandtraining.txt')
trainStep = EstimatorStep(
name='Model Training',
estimator=train,
estimator_entry_script_arguments=["--source_path", seer_tfrecords,
"--target_path", seer_training,
"--epochs", 5, # Consider transfer learning. See line 111 in train.py file.
"--batch", 10,
"--lr", 0.001],
inputs=[seer_tfrecords],
outputs=[seer_training],
compute_target=compute
)
## Register Model Step ##
# Once training is complete, register.py registers the model with AML #
# Configuration for registration step #
registerEnvironment = Environment.from_pip_requirements('registerenv', 'requirements-registration.txt')
registerRunConfig = RunConfiguration()
registerRunConfig.environment = registerEnvironment
seer_model = PipelineData(
"model",
datastore=datastore,
is_directory=True
)
registerStep = PythonScriptStep(
'register.py',
source_directory='.',
name='Model Registration',
arguments=["--source_path", seer_training,
"--target_path", seer_model,
"--universal_package_version", packageversion],
inputs=[seer_training],
outputs=[seer_model],
compute_target=compute,
runconfig=registerRunConfig
)
## Create and publish the Pipeline ##
# We now define and publish the pipeline #
pipeline = Pipeline(workspace=ws, steps=[prepStep, trainStep, registerStep])
published_pipeline = pipeline.publish(
name="Seer Pipeline",
description="Transfer learned image classifier. Uses folders as labels.")
## Submit the pipeline to be run ##
# Finally, we submit the pipeline for execution #
pipeline_run = Experiment(ws, 'seer',).submit(published_pipeline, tags={'universalPackageVersion': packageversion})
print('Run created with ID: ', pipeline_run.id)
|
import torch
import torch.nn as nn
class Normalize_layer(nn.Module):
def __init__(self, mean, std):
super(Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),
requires_grad=False)
def forward(self, input):
return input.sub(self.mean).div(self.std)
class noise_Normalize_layer(nn.Module):
def __init__(self, mean, std, input_noise=False):
super(noise_Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.input_noise = input_noise
self.alpha_i = nn.Parameter(torch.Tensor([0.25]), requires_grad=True)
def forward(self, input):
output = input.sub(self.mean).div(self.std)
input_std = output.std().item()
input_noise = output.clone().normal_(0, input_std)
return output + input_noise * self.alpha_i * self.input_noise
|
import logging
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from modeling.layers.epipolar import Epipolar
from modeling import registry
from core import cfg
from .basic_batch import find_tensor_peak_batch
from utils.logger import setup_logger
from utils.model_serialization import load_state_dict
# logger = logging.getLogger(__name__)
logger = setup_logger("resnet", cfg.FOLDER_NAME)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_channels = 512 * block.expansion
#self.fc = nn.Linear(self.out_channels, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):
if norm_layer is None:
norm_layer = nn.BatchNorm2d
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
#x = self.fc(x)
return x
@registry.BACKBONES.register('R-18')
def resnet18(cfg, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
@registry.BACKBONES.register('R-34')
def resnet34(cfg, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
return model
@registry.BACKBONES.register('R-50')
def resnet50(cfg, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
@registry.BACKBONES.register('R-101')
def resnet101(cfg, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)
return model
@registry.BACKBONES.register('R-152')
def resnet152(cfg, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)
return model
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Written by Chunyu Wang (chnuwa@microsoft.com), modified by Yihui He
# ------------------------------------------------------------------------------
class PoseResNet(nn.Module):
def __init__(self, block, layers, cfg, **kwargs):
if cfg.BACKBONE.BN_MOMENTUM < 0:
self.BN_MOMENTUM = None
else:
self.BN_MOMENTUM = cfg.BACKBONE.BN_MOMENTUM
DECONV_WITH_BIAS = False
NUM_DECONV_LAYERS = 3
NUM_DECONV_FILTERS = [256, 256, 256]
NUM_DECONV_KERNELS = [4, 4, 4]
FINAL_CONV_KERNEL = 1 #cfg.POSE_RESNET.FINAL_CONV_KERNEL
self.inplanes = 64
self.deconv_with_bias = DECONV_WITH_BIAS
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=self.BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
NUM_DECONV_LAYERS,
NUM_DECONV_FILTERS,
NUM_DECONV_KERNELS,
)
self.final_layer = nn.Conv2d(
in_channels=NUM_DECONV_FILTERS[-1],
out_channels=cfg.KEYPOINT.NUM_PTS,
kernel_size=FINAL_CONV_KERNEL,
stride=1,
padding=1 if FINAL_CONV_KERNEL == 3 else 0
)
if 'epipolarpose' in cfg.BACKBONE.BODY:
if cfg.EPIPOLAR.MERGE == 'both':
self.epipolar_sampler1 = Epipolar()
self.epipolar_sampler = Epipolar()
else:
self.epipolar_sampler = None
self.epipolar_sampler1 = None
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=self.BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=self.BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x, other_inputs=[None, None, None, None, None, None, None]):
batch_size = x.shape[0]
other_features, other_KRT, other_heatmaps, KRT, camera, other_camera, other_img = other_inputs
features, heatmaps, batch_locs, batch_scos, corr_poss, depths = [], [], [], [], [], []
# 3 x 256 x 256
x = self.conv1(x)
# 128 x 128
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
# 256 x 64 x 64
def getOtherFeat(feat, sampler=None):
# skip feature aggregation for last layer
corr_pos = None
depth = None
if other_features is None:
# normal hourglass
return feat, None, None, None
if 'epipolarpose' in cfg.BACKBONE.BODY:
ret, corr_pos, depth, sample_locs = \
sampler(feat, other_features, KRT, other_KRT, \
camera=camera, other_camera=other_camera)
return ret + feat, corr_pos, depth, sample_locs
if cfg.EPIPOLAR.MERGE == 'early':
feature = x
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)
depths.append(depth)
corr_poss.append(corr_pos)
elif cfg.EPIPOLAR.MERGE == 'both':
feature = x
x, _, _, _ = getOtherFeat(feature, sampler=self.epipolar_sampler)
x = self.layer2(x)
# 512 x 32 × 32
x = self.layer3(x)
# 1024 x 16 × 16
x = self.layer4(x)
# 2048 x 8 x 8
feature = self.deconv_layers(x)
#256 x 64 x 64
if cfg.EPIPOLAR.MERGE == 'late':
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)
depths.append(depth)
corr_poss.append(corr_pos)
elif cfg.EPIPOLAR.MERGE == 'both':
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler1)
depths.append(depth)
corr_poss.append(corr_pos)
else:
x = feature
#20 x 64 x 64
heatmaps.append(self.final_layer(x))
# The location of the current batch
for ibatch in range(batch_size):
batch_location, batch_score = find_tensor_peak_batch(heatmaps[-1][ibatch],
cfg.KEYPOINT.SIGMA,
cfg.BACKBONE.DOWNSAMPLE)
batch_locs.append(batch_location)
batch_scos.append(batch_score)
batch_locs, batch_scos = torch.stack(batch_locs), torch.stack(batch_scos)
if other_features is None:
corr_poss, depths = None, None
else:
corr_poss = corr_poss[-1]
depths = depths[-1]
return feature, heatmaps, batch_locs, batch_scos, corr_poss, depths, sample_locs, None
def init_weights(self, pretrained=None):
if pretrained is not None:
if isinstance(pretrained, str) and os.path.isfile(pretrained):
logger.info('=> loading pretrained model {}'.format(pretrained))
pretrained_state_dict = torch.load(pretrained)
else:
logger.info('=> loading pretrained model from web')
pretrained_state_dict = pretrained
logger.info('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init final conv weights from normal distribution')
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#load_state_dict(self, pretrained_state_dict, prefix='resnet.')
#load_state_dict(self, pretrained_state_dict, prefix='backbone.')
load_state_dict(self, pretrained_state_dict, strict=False, ignored_layers=['final_layer.bias', 'final_layer.weight'], prefix=cfg.WEIGHTS_PREFIX, prefix_replace=cfg.WEIGHTS_PREFIX_REPLACE)
#self.load_state_dict(pretrained_state_dict, strict=False)
else:
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
# nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
resnet_spec = {'18': (BasicBlock, [2, 2, 2, 2]),
'34': (BasicBlock, [3, 4, 6, 3]),
'50': (Bottleneck, [3, 4, 6, 3]),
'101': (Bottleneck, [3, 4, 23, 3]),
'152': (Bottleneck, [3, 8, 36, 3])}
@registry.BACKBONES.register('poseR-18')
@registry.BACKBONES.register('poseR-34')
@registry.BACKBONES.register('poseR-50')
@registry.BACKBONES.register('poseR-101')
@registry.BACKBONES.register('poseR-152')
@registry.BACKBONES.register('epipolarposeR-18')
@registry.BACKBONES.register('epipolarposeR-34')
@registry.BACKBONES.register('epipolarposeR-50')
@registry.BACKBONES.register('epipolarposeR-101')
@registry.BACKBONES.register('epipolarposeR-152')
def get_pose_net(cfg, **kwargs):
num_layers = cfg.BACKBONE.BODY.split('-')[-1]
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, cfg, **kwargs)
if cfg.BACKBONE.PRETRAINED:
# model.init_weights(cfg.NETWORK.PRETRAINED)
if cfg.BACKBONE.PRETRAINED_WEIGHTS:
model.init_weights(cfg.BACKBONE.PRETRAINED_WEIGHTS)
else:
model.init_weights(model_zoo.load_url(model_urls['resnet'+num_layers]))
return model
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from st2client.models import core
LOG = logging.getLogger(__name__)
class KeyValuePair(core.Resource):
_alias = 'Key'
_display_name = 'Key Value Pair'
_plural = 'Keys'
_plural_display_name = 'Key Value Pairs'
_repr_attributes = ['name', 'value']
# Note: This is a temporary hack until we refactor client and make it support non id PKs
def get_id(self):
return self.name
def set_id(self, value):
pass
id = property(get_id, set_id)
|
import pandas as pd
import numpy as np
import statsmodels.api as sm
from enum import Enum
class ParValue(Enum):
TSTAT = 1
PVALUE = 2
STD = 3
class OneReg:
def __init__(self, reg, show_list=[], hide_list=[], blocks=[], bottom_blocks=[]):
self.reg = reg
if show_list == []:
self.show_list = []
for s in self.reg.params.keys():
if s not in hide_list:
self.show_list.append(s)
else:
self.show_list = show_list
self.blocks = blocks
self.bottom_block = bottom_blocks
def create_columns(self):
# first add the parameters of the reg
d = pd.Series(dtype=object)
for k in self.show_list:
if k in self.reg.params.keys():
v = self.reg.pvalues[k]
p = f'{np.round(self.reg.params[k], TableReg.round):,}'
for tr in TableReg.sign_tr:
if v <= tr:
p += '*'
# self = table_2.reg_list[0]
# update the v to be tstat or std depending on parameters
if TableReg.par_value == ParValue.TSTAT:
v = self.reg.tvalues[k]
if TableReg.par_value == ParValue.STD:
v = self.reg.bse[k]
v = r'(' + f'{np.round(v, TableReg.round):,}' + r')'
v_l = [len(x) for x in v.split('.')]
p_l = [len(x) for x in p.split('.')]
t = abs(v_l[0] - p_l[0])
t = r'\phantom{' + '*' * t + '}'
if v_l[0] > p_l[0]:
p = t + p
if v_l[0] < p_l[0]:
v = t + v
t = abs(v_l[1] - p_l[1])
t = r'\phantom{' + '*' * t + '}'
if v_l[1] > p_l[1]:
p = p+t
if v_l[1] < p_l[1]:
v = v+t
# else:
# p = r'\phantom{(}' + p + r'\phantom{)}'
d[k] = p
t = pd.Series(dtype=object)
t[''] =v
d = d.append(t)
else:
t = pd.Series(dtype=object)
t[k] = TableReg.missing_symbol
t[''] = TableReg.missing_symbol
d = d.append(t)
# now we can add the "blocks", that is fix effects and others
for block in self.blocks:
t = pd.Series(dtype=object)
t[TableReg.group_key] = ''
for k in block.keys():
t[k] = block[k]
d = d.append(t)
# finaly additional info (r² and n.obs per default, but you can add anything through bottom blocks
if TableReg.show_obs | TableReg.show_r2 | (len(self.bottom_block)>0):
t = pd.Series(dtype=object)
t[TableReg.group_key] = ''
t['Observations'] = f'{int(self.reg.nobs):,}'
if hasattr(self.reg,'rsquared_adj'):
t[r'$R^2$'] = np.round(self.reg.rsquared_adj,TableReg.round_r2)
else:
t[r'Pseudo $R^2$'] = np.round(self.reg.prsquared,TableReg.round_r2)
first_block = True
for block in self.bottom_block:
if first_block:
first_block = False
else:
t[TableReg.group_key] = ''
t = pd.Series(dtype=object)
for k in block.keys():
t[k] = block[k]
d = d.append(t)
return d
class TableReg:
missing_symbol = ' '
par_value = ParValue.STD
round = 4
round_r2 = 4
sign_tr = [0.1, 0.05, 0.01]
show_obs = True
show_r2 = True
variable_skip = r'\smallskip'
group_key = 'asgeg'
group_skip = r'\medskip'
equal_lines = False
def __init__(self, **option):
self.reg_list = []
self.hide_list = []
self.order = []
self.df = None
self.final_show_list = []
self.show_only_list = []
self.col_groups = []
self.rename_dict = {}
if 'hide_list' in option:
assert type(option['hide_list']) == list, "The overall hide list has to be a list"
self.hide_list = option['hide_list']
if 'show_only_list' in option:
assert type(option['show_only_list']) == list, "The show only list has to be a list"
self.show_only_list = option['show_only_list']
if 'order' in option:
assert type(option['order']) == list, "The order has to be a list"
self.order = option['order']
if 'col_groups' in option:
self.set_col_groups(option['col_groups'])
if 'rename_dict' in option:
self.set_rename_dict(option['rename_dict'])
def set_rename_dict(self, rename_dict):
assert type(rename_dict) == dict, "The rename dict must be a dictionary"
self.rename_dict = rename_dict
def set_col_groups(self, groups):
assert type(groups) == list, "The col order has to be a list of list"
for group in groups:
assert type(group) == list, "Each col group must be a list ['name of group', first columne in the group (int), last col in group (int)]"
self.col_groups = groups
def add_reg(self, reg, show_list=[], hide_list=[], blocks=[],bottom_blocks=[]):
hide_list = hide_list + self.hide_list
self.reg_list.append(OneReg(reg, show_list, hide_list, blocks, bottom_blocks))
def update_show_list(self):
if len(self.show_only_list) == 0:
show_list = []
for oneReg in self.reg_list:
show_list = list(set(show_list + oneReg.show_list))
show_list = list(np.sort(show_list))
show_list = self.order + [x for x in show_list if x not in self.order]
else:
show_list = self.show_only_list
col = []
for oneReg in self.reg_list:
oneReg.show_list = show_list
col.append(oneReg.create_columns())
self.df = pd.concat(col,1)
self.df.columns = [r'\parboxc{c}{0.6cm}{('+str(int(i+1))+')}' for i in range(self.df.shape[1])]
self.df = self.df.rename(index=self.rename_dict)
self.final_show_list = show_list
self.final_show_list = pd.Series(self.final_show_list).replace(self.rename_dict).values.tolist()
self.tex=''
def create_tex(self):
self.update_show_list()
# writing the tex modification to include name templatess
tex = self.df.to_latex(escape=False)
cols = tex.split('\\begin{tabular}{')[1].split('}')[0]
rep = list(cols.replace('l','c'))
rep[0] = 'l'
tex = tex.replace(cols,''.join(rep))
if len(self.col_groups)>0:
# adding "group col names"
s = '\n '
s_line = '\n '
for g in self.col_groups:
s += '& \multicolumn{'+str(1+g[2]-g[1])+'}{c}{\parboxc{c}{0.6cm}{'+g[0]+'}}'
# s += '& \multicolumn{'+str(1+g[2]-g[1])+'}{c}{'+g[0]+'}'
s_line += r'\cmidrule(lr){'+str(g[1]+1)+'-'+str(g[2]+1)+'}'
s += r' \\'+'\n'
s_line += '\n'
ts = tex.split(r'\toprule')
tex = ts[0]+r'\toprule' + s +s_line+ ts[1]
ts = tex.split(r'\midrule')
tex = ts[0]+r'\midrule' + ts[1]
# adding the skip between variable
# first we extract the maxium length of a column on the first one
L = 0
for x in self.df.index:
L = max(L,len(x))
L+=1
for i in range(1,len(self.final_show_list)):
a = self.final_show_list[i]
a += ' '*(L-len(a))+'&'
ts = tex.split(a)
temp = ts[0][:-4] + TableReg.variable_skip + ts[0][-4:]
tex=temp+a+ts[1]
# processing the group skip
t = None
for item in tex.split("\n"):
if TableReg.group_key in item:
t = item
# replacing specific rule
if t is not None:
self.tex = tex.replace(t, TableReg.group_skip + r'\\')
else:
self.tex = tex
def save_tex(self, save_dir):
self.create_tex()
tex = self.tex
if TableReg.equal_lines:
tex=tex.replace(r'\toprule',r'\hline')
tex=tex.replace(r'\midrule',r'\hline')
tex=tex.replace(r'\bottomrule',r'\hline')
with open(save_dir,'w') as txt:
txt.write(tex)
@staticmethod
def create_panel_of_tables(table_list, name_list, save_dir):
numbers = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split()
title_list = []
for i in range(len(table_list)):
table_list[i].create_tex()
title_list.append('Panel '+numbers[i]+': '+name_list[i])
tex = table_list[0].tex
temp = r' \multicolumn{6}{c}{\parboxc{c}{0.7cm}{'+title_list[i]+r'}} \\'
ts = tex.split(r'\toprule')
tex = ts[0]+r'\toprule' +temp+r'\hline'+ts[1]
tex = tex.replace(r'\bottomrule','')
tex = tex.replace(r'\end{tabular}',r'asf')
tex = tex.replace('\\\\\n\nasf','\\bigskip \\\\ \n')
for i in range(1,len(table_list)):
t_tex = table_list[i].tex
temp = r' \multicolumn{6}{c}{\parboxc{c}{0.6cm}{' + title_list[i] + r'}} \\'
ts = t_tex.split(r'\toprule')
t_tex = ts[0] + r'\hline' + temp + r'\hline' + ts[1]
t = None
for item in t_tex.split("\n"):
if r'\begin{tabular}' in item:
t = item
t_tex = t_tex.replace(t,'')
if i+1 < len(table_list):
t_tex = t_tex.replace(r'\bottomrule','')
t_tex = t_tex.replace(r'\end{tabular}', r'asf')
t_tex = t_tex.replace('\\\\\n\nasf', '\\bigskip \\\\ \n')
tex +=t_tex
with open(save_dir,'w') as txt:
txt.write(tex)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stim
import pytest
def test_identity():
p = stim.PauliString(3)
assert len(p) == 3
assert p[0] == p[1] == p[2] == 0
assert p.sign == +1
def test_from_str():
p = stim.PauliString("-_XYZ_ZYX")
assert len(p) == 8
assert p[0] == 0
assert p[1] == 1
assert p[2] == 2
assert p[3] == 3
assert p[4] == 0
assert p[5] == 3
assert p[6] == 2
assert p[7] == 1
assert p.sign == -1
p = stim.PauliString("")
assert len(p) == 0
assert p.sign == +1
p = stim.PauliString("X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("+X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("+iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("-iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == -1j
def test_equality():
assert not (stim.PauliString(4) == None)
assert not (stim.PauliString(4) == "other object")
assert not (stim.PauliString(4) == object())
assert stim.PauliString(4) != None
assert stim.PauliString(4) != "other object"
assert stim.PauliString(4) != object()
assert stim.PauliString(4) == stim.PauliString(4)
assert stim.PauliString(3) != stim.PauliString(4)
assert not (stim.PauliString(4) != stim.PauliString(4))
assert not (stim.PauliString(3) == stim.PauliString(4))
assert stim.PauliString("+X") == stim.PauliString("+X")
assert stim.PauliString("+X") != stim.PauliString("-X")
assert stim.PauliString("+X") != stim.PauliString("+Y")
assert stim.PauliString("+X") != stim.PauliString("-Y")
assert stim.PauliString("+X") != stim.PauliString("+iX")
assert stim.PauliString("+X") != stim.PauliString("-iX")
assert stim.PauliString("__") != stim.PauliString("_X")
assert stim.PauliString("__") != stim.PauliString("X_")
assert stim.PauliString("__") != stim.PauliString("XX")
assert stim.PauliString("__") == stim.PauliString("__")
def test_random():
p1 = stim.PauliString.random(100)
p2 = stim.PauliString.random(100)
assert p1 != p2
seen_signs = {stim.PauliString.random(1).sign for _ in range(200)}
assert seen_signs == {1, -1}
seen_signs = {stim.PauliString.random(1, allow_imaginary=True).sign for _ in range(200)}
assert seen_signs == {1, -1, 1j, -1j}
def test_str():
assert str(stim.PauliString(3)) == "+___"
assert str(stim.PauliString("XYZ")) == "+XYZ"
assert str(stim.PauliString("-XYZ")) == "-XYZ"
assert str(stim.PauliString("iXYZ")) == "+iXYZ"
assert str(stim.PauliString("-iXYZ")) == "-iXYZ"
def test_repr():
assert repr(stim.PauliString(3)) == 'stim.PauliString("+___")'
assert repr(stim.PauliString("-XYZ")) == 'stim.PauliString("-XYZ")'
vs = [
stim.PauliString(""),
stim.PauliString("ZXYZZ"),
stim.PauliString("-XYZ"),
stim.PauliString("I"),
stim.PauliString("iIXYZ"),
stim.PauliString("-iIXYZ"),
]
for v in vs:
r = repr(v)
assert eval(r, {'stim': stim}) == v
def test_commutes():
def c(a: str, b: str) -> bool:
return stim.PauliString(a).commutes(stim.PauliString(b))
assert c("", "")
assert c("X", "_")
assert c("X", "X")
assert not c("X", "Y")
assert not c("X", "Z")
assert c("XXXX", "YYYY")
assert c("XXXX", "YYYZ")
assert not c("XXXX", "XXXZ")
assert not c("XXXX", "___Z")
assert not c("XXXX", "Z___")
assert c("XXXX", "Z_Z_")
def test_product():
assert stim.PauliString("") * stim.PauliString("") == stim.PauliString("")
assert stim.PauliString("i") * stim.PauliString("i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-i") == stim.PauliString("+")
assert stim.PauliString("-i") * stim.PauliString("-i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-") == stim.PauliString("-i")
x = stim.PauliString("X")
y = stim.PauliString("Y")
z = stim.PauliString("Z")
assert x == +1 * x == x * +1 == +x
assert x * -1 == -x == -1 * x
assert (-x)[0] == 1
assert (-x).sign == -1
assert -(-x) == x
assert stim.PauliString(10) * stim.PauliString(11) == stim.PauliString(11)
assert x * z == stim.PauliString("-iY")
assert x * x == stim.PauliString(1)
assert x * y == stim.PauliString("iZ")
assert y * x == stim.PauliString("-iZ")
assert x * y == 1j * z
assert y * x == z * -1j
assert x.extended_product(y) == (1, 1j * z)
assert y.extended_product(x) == (1, -1j * z)
assert x.extended_product(x) == (1, stim.PauliString(1))
xx = stim.PauliString("+XX")
yy = stim.PauliString("+YY")
zz = stim.PauliString("+ZZ")
assert xx * zz == -yy
assert xx.extended_product(zz) == (1, -yy)
def test_inplace_product():
p = stim.PauliString("X")
alias = p
p *= 1j
assert alias == stim.PauliString("iX")
assert alias is p
p *= 1j
assert alias == stim.PauliString("-X")
p *= 1j
assert alias == stim.PauliString("-iX")
p *= 1j
assert alias == stim.PauliString("+X")
p *= stim.PauliString("Z")
assert alias == stim.PauliString("-iY")
p *= -1j
assert alias == stim.PauliString("-Y")
p *= -1j
assert alias == stim.PauliString("iY")
p *= -1j
assert alias == stim.PauliString("+Y")
p *= -1j
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-iY")
assert alias is p
def test_imaginary_phase():
p = stim.PauliString("IXYZ")
ip = stim.PauliString("iIXYZ")
assert 1j * p == p * 1j == ip == -stim.PauliString("-iIXYZ")
assert p.sign == 1
assert (-p).sign == -1
assert ip.sign == 1j
assert (-ip).sign == -1j
assert stim.PauliString("X") * stim.PauliString("Y") == 1j * stim.PauliString("Z")
assert stim.PauliString("Y") * stim.PauliString("X") == -1j * stim.PauliString("Z")
def test_get_set_sign():
p = stim.PauliString(2)
assert p.sign == +1
p.sign = -1
assert str(p) == "-__"
assert p.sign == -1
p.sign = +1
assert str(p) == "+__"
assert p.sign == +1
with pytest.raises(ValueError, match="new_sign"):
p.sign = 5
p.sign = 1j
assert str(p) == "+i__"
assert p.sign == 1j
p.sign = -1j
assert str(p) == "-i__"
assert p.sign == -1j
def test_get_set_item():
p = stim.PauliString(5)
assert list(p) == [0, 0, 0, 0, 0]
assert p[0] == 0
p[0] = 1
assert p[0] == 1
p[0] = 'Y'
assert p[0] == 2
p[0] = 'Z'
assert p[0] == 3
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 't'
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 10
assert p[1] == 0
p[1] = 2
assert p[1] == 2
def test_get_slice():
p = stim.PauliString("XXXX__YYYY__ZZZZX")
assert p[:7] == stim.PauliString("XXXX__Y")
assert p[:-3] == stim.PauliString("XXXX__YYYY__ZZ")
assert p[::2] == stim.PauliString("XX_YY_ZZX")
assert p[::-1] == stim.PauliString("XZZZZ__YYYY__XXXX")
assert p[-3:3] == stim.PauliString("")
assert p[-6:-1] == stim.PauliString("_ZZZZ")
assert p[3:5:-1] == stim.PauliString("")
assert p[5:3:-1] == stim.PauliString("__")
assert p[4:2:-1] == stim.PauliString("_X")
assert p[2:0:-1] == stim.PauliString("XX")
def test_copy():
p = stim.PauliString(3)
p2 = p.copy()
assert p == p2
assert p is not p2
p = stim.PauliString("-i_XYZ")
p2 = p.copy()
assert p == p2
assert p is not p2
def test_hash():
# stim.PauliString is mutable. It must not also be value-hashable.
# Defining __hash__ requires defining a FrozenPauliString variant instead.
with pytest.raises(TypeError, match="unhashable"):
_ = hash(stim.PauliString(1))
def test_add():
ps = stim.PauliString
assert ps(0) + ps(0) == ps(0)
assert ps(3) + ps(1000) == ps(1003)
assert ps(1000) + ps(3) == ps(1003)
assert ps("_XYZ") + ps("_ZZZ_") == ps("_XYZ_ZZZ_")
p = ps("_XYZ")
p += p
assert p == ps("_XYZ_XYZ")
for k in range(1, 8):
p += p
assert p == ps("_XYZ_XYZ" * 2**k)
p = ps("_XXX")
p += ps("Y")
assert p == ps("_XXXY")
p = ps("")
alias = p
p += ps("X")
assert alias is p
assert alias == ps("X")
p += p
assert alias is p
assert alias == ps("XX")
def test_mul_different_sizes():
ps = stim.PauliString
assert ps("") * ps("X" * 1000) == ps("X" * 1000)
assert ps("X" * 1000) * ps("") == ps("X" * 1000)
assert ps("Z" * 1000) * ps("") == ps("Z" * 1000)
p = ps("Z")
alias = p
p *= ps("ZZZ")
assert p == ps("_ZZ")
p *= ps("Z")
assert p == ps("ZZZ")
assert alias is p
def test_div():
assert stim.PauliString("+XYZ") / +1 == stim.PauliString("+XYZ")
assert stim.PauliString("+XYZ") / -1 == stim.PauliString("-XYZ")
assert stim.PauliString("+XYZ") / 1j == stim.PauliString("-iXYZ")
assert stim.PauliString("+XYZ") / -1j == stim.PauliString("iXYZ")
assert stim.PauliString("iXYZ") / 1j == stim.PauliString("XYZ")
p = stim.PauliString("__")
alias = p
assert p / -1 == stim.PauliString("-__")
assert alias == stim.PauliString("__")
p /= -1
assert alias == stim.PauliString("-__")
p /= 1j
assert alias == stim.PauliString("i__")
p /= 1j
assert alias == stim.PauliString("__")
p /= -1j
assert alias == stim.PauliString("i__")
p /= 1
assert alias == stim.PauliString("i__")
def test_mul_repeat():
ps = stim.PauliString
assert ps("") * 100 == ps("")
assert ps("X") * 100 == ps("X" * 100)
assert ps("XYZ_") * 1000 == ps("XYZ_" * 1000)
assert ps("XYZ_") * 1 == ps("XYZ_")
assert ps("XYZ_") * 0 == ps("")
assert 100 * ps("") == ps("")
assert 100 * ps("X") == ps("X" * 100)
assert 1000 * ps("XYZ_") == ps("XYZ_" * 1000)
assert 1 * ps("XYZ_") == ps("XYZ_")
assert 0 * ps("XYZ_") == ps("")
assert ps("i") * 0 == ps("+")
assert ps("i") * 1 == ps("i")
assert ps("i") * 2 == ps("-")
assert ps("i") * 3 == ps("-i")
assert ps("i") * 4 == ps("+")
assert ps("i") * 5 == ps("i")
assert ps("-i") * 0 == ps("+")
assert ps("-i") * 1 == ps("-i")
assert ps("-i") * 2 == ps("-")
assert ps("-i") * 3 == ps("i")
assert ps("-i") * 4 == ps("+")
assert ps("-i") * 5 == ps("-i")
assert ps("-") * 0 == ps("+")
assert ps("-") * 1 == ps("-")
assert ps("-") * 2 == ps("+")
assert ps("-") * 3 == ps("-")
assert ps("-") * 4 == ps("+")
assert ps("-") * 5 == ps("-")
p = ps("XYZ")
alias = p
p *= 1000
assert p == ps("XYZ" * 1000)
assert alias is p
def test_init_list():
assert stim.PauliString([]) == stim.PauliString(0)
assert stim.PauliString([0, 1, 2, 3]) == stim.PauliString("_XYZ")
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([-1])
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([4])
with pytest.raises(TypeError):
_ = stim.PauliString([2**500])
def test_init_copy():
p = stim.PauliString("_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
p = stim.PauliString("-i_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
def test_commutes_different_lengths():
x1000 = stim.PauliString("X" * 1000)
z1000 = stim.PauliString("Z" * 1000)
x1 = stim.PauliString("X")
z1 = stim.PauliString("Z")
assert x1.commutes(x1000)
assert x1000.commutes(x1)
assert z1.commutes(z1000)
assert z1000.commutes(z1)
assert not z1.commutes(x1000)
assert not x1000.commutes(z1)
assert not x1.commutes(z1000)
assert not z1000.commutes(x1)
def test_pickle():
import pickle
t = stim.PauliString.random(4)
a = pickle.dumps(t)
assert pickle.loads(a) == t
t = stim.PauliString("i_XYZ")
a = pickle.dumps(t)
assert pickle.loads(a) == t
|
#!/usr/bin/env python3
"""Node para controlar um robô de sumô
File
-------
sumo_controller/src/sumo_controller_node.py
Authors
-------
ThundeRatz Team <comp@thunderatz.org>
"""
import rospy
from std_msgs.msg import Float64
CONTROL_RATE = 60 # Hz
def main():
""" Lógica principal do node de controle
"""
rospy.init_node("sumo_controller", disable_signals=True, anonymous=True)
rospy.loginfo(f"Node de controle iniciado {rospy.get_time()}")
rate = rospy.Rate(CONTROL_RATE)
# Inicialize os sensores e motores aqui
while not rospy.is_shutdown():
# Escreva aqui seu código para controlar o sumô
rate.sleep()
if __name__ == "__main__":
try:
main()
except (rospy.ROSInterruptException, KeyboardInterrupt):
pass
finally:
# Corrija o nome dos tópicos!!!
left_motor_pub = rospy.Publisher("topico/do/motor/esquerdo", Float64, queue_size=1)
right_motor_pub = rospy.Publisher("topico/do/motor/direito", Float64, queue_size=1)
left_motor_pub.publish(Float64(0))
right_motor_pub.publish(Float64(0))
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import os
import subprocess
import tempfile
from subprocess import CalledProcessError
from textwrap import dedent
import pytest
from pex.common import temporary_dir, touch
from pex.executor import Executor
from pex.testing import run_pex_command
from pex.tools.commands.virtualenv import Virtualenv
from pex.typing import TYPE_CHECKING
from pex.util import named_temporary_file
if TYPE_CHECKING:
from typing import Callable, Tuple, Any, Dict, Optional, Iterable
CreatePexVenv = Callable[[Tuple[str, ...]], Virtualenv]
FABRIC_VERSION = "2.5.0"
@pytest.fixture(scope="module")
def pex():
# type: () -> str
with temporary_dir() as tmpdir:
pex_path = os.path.join(tmpdir, "fabric.pex")
src_dir = os.path.join(tmpdir, "src")
touch(os.path.join(src_dir, "user/__init__.py"))
touch(os.path.join(src_dir, "user/package/__init__.py"))
# N.B.: --unzip just speeds up runs 2+ of the pex file and is otherwise not relevant to
# these tests.
run_pex_command(
args=[
"fabric=={}".format(FABRIC_VERSION),
"-c",
"fab",
"--sources-directory",
src_dir,
"-o",
pex_path,
"--unzip",
"--include-tools",
]
)
yield os.path.realpath(pex_path)
def make_env(**kwargs):
# type: (**Any) -> Dict[str, str]
env = os.environ.copy()
env.update((k, str(v)) for k, v in kwargs.items())
return env
@pytest.fixture
def create_pex_venv(pex):
# type: (str) -> CreatePexVenv
with temporary_dir() as tmpdir:
venv_dir = os.path.join(tmpdir, "venv")
def _create_pex_venv(*options):
# type: (*str) -> Virtualenv
subprocess.check_call(
args=[pex, "venv", venv_dir] + list(options or ()), env=make_env(PEX_TOOLS="1")
)
return Virtualenv(venv_dir)
yield _create_pex_venv
def test_force(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv("--pip")
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
venv.interpreter.execute(args=["-c", "import colors"])
with pytest.raises(CalledProcessError):
create_pex_venv()
venv_force = create_pex_venv("--force")
# The re-created venv should have no ansicolors installed like the prior venv.
with pytest.raises(Executor.NonZeroExit):
venv_force.interpreter.execute(args=["-c", "import colors"])
# The re-created venv should have no pip installed either.
with pytest.raises(Executor.NonZeroExit):
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
def execute_venv_pex_interpreter(
venv, # type: Virtualenv
code=None, # type: Optional[str]
extra_args=(), # type: Iterable[str]
**extra_env # type: Any
):
# type: (...) -> Tuple[int, str, str]
process = subprocess.Popen(
args=[venv.join_path("pex")] + list(extra_args),
env=make_env(PEX_INTERPRETER=True, **extra_env),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdout, stderr = process.communicate(input=None if code is None else code.encode())
return process.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def expected_file_path(
venv, # type: Virtualenv
package, # type: str
):
# type: (...) -> str
return os.path.realpath(
os.path.join(
venv.site_packages_dir,
os.path.sep.join(package.split(".")),
"__init__.{ext}".format(ext="pyc" if venv.interpreter.version[0] == 2 else "py"),
)
)
def parse_fabric_version_output(output):
# type: (str) -> Dict[str, str]
return dict(line.split(" ", 1) for line in output.splitlines())
def test_venv_pex(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
venv_pex = venv.join_path("pex")
fabric_output = subprocess.check_output(args=[venv_pex, "-V"])
# N.B.: `fab -V` output looks like so:
# $ fab -V
# Fabric 2.5.0
# Paramiko 2.7.2
# Invoke 1.4.1
versions = parse_fabric_version_output(fabric_output.decode("utf-8"))
assert FABRIC_VERSION == versions["Fabric"]
invoke_version = "Invoke {}".format(versions["Invoke"])
invoke_script_output = subprocess.check_output(
args=[venv_pex, "-V"], env=make_env(PEX_SCRIPT="invoke")
)
assert invoke_version == invoke_script_output.decode("utf-8").strip()
invoke_entry_point_output = subprocess.check_output(
args=[venv_pex, "-V"],
env=make_env(PEX_MODULE="invoke.main:program.run"),
)
assert invoke_version == invoke_entry_point_output.decode("utf-8").strip()
pex_extra_sys_path = ["/dev/null", "Bob"]
returncode, _, stderr = execute_venv_pex_interpreter(
venv,
code=dedent(
"""\
from __future__ import print_function
import os
import sys
def assert_equal(test_num, expected, actual):
if expected == actual:
return
print(
"[{{}}] Expected {{}} but got {{}}".format(test_num, expected, actual),
file=sys.stderr,
)
sys.exit(test_num)
assert_equal(1, {pex_extra_sys_path!r}, sys.path[-2:])
import fabric
assert_equal(2, {fabric!r}, os.path.realpath(fabric.__file__))
import user.package
assert_equal(3, {user_package!r}, os.path.realpath(user.package.__file__))
""".format(
pex_extra_sys_path=pex_extra_sys_path,
fabric=expected_file_path(venv, "fabric"),
user_package=expected_file_path(venv, "user.package"),
)
),
PEX_EXTRA_SYS_PATH=os.pathsep.join(pex_extra_sys_path),
)
assert 0 == returncode, stderr
def test_binary_path(create_pex_venv):
# type: (CreatePexVenv) -> None
code = dedent(
"""\
import errno
import subprocess
import sys
# PEXed code should be able to find all (console) scripts on the $PATH when the venv is
# created with --bin-path set, and the scripts should all run with the venv interpreter in
# order to find their code.
def try_invoke(*args):
try:
subprocess.check_call(list(args))
return 0
except OSError as e:
if e.errno == errno.ENOENT:
# This is what we expect when scripts are not set up on PATH via --bin-path.
return 1
return 2
exit_code = try_invoke("fab", "-V")
exit_code += 10 * try_invoke("inv", "-V")
exit_code += 100 * try_invoke("invoke", "-V")
sys.exit(exit_code)
"""
)
venv = create_pex_venv()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=code, PATH=tempfile.gettempdir()
)
assert 111 == returncode, stdout + stderr
venv_bin_path = create_pex_venv("-f", "--bin-path", "prepend")
returncode, _, _ = execute_venv_pex_interpreter(
venv_bin_path, code=code, PATH=tempfile.gettempdir()
)
assert 0 == returncode
def test_venv_pex_interpreter_special_modes(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
# special mode execute module: -m module
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-m"])
assert 2 == returncode, stderr
assert "" == stdout
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-m", "fabric", "--version"]
)
assert 0 == returncode, stderr
versions = parse_fabric_version_output(stdout)
assert FABRIC_VERSION == versions["Fabric"]
# special mode execute code string: -c <str>
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-c"])
assert 2 == returncode, stderr
assert "" == stdout
fabric_file_code = "import fabric, os; print(os.path.realpath(fabric.__file__))"
expected_fabric_file_path = expected_file_path(venv, "fabric")
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-c", fabric_file_code]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute stdin: -
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=["-"]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute python file: <py file name>
with named_temporary_file(prefix="code", suffix=".py", mode="w") as fp:
fp.write(fabric_file_code)
fp.close()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=[fp.name]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
|
from cvxpy import Variable, Parameter, Minimize, Problem, OSQP, quad_form
import numpy as np
import scipy as sp
import scipy.sparse as sparse
import time
if __name__ == "__main__":
# Discrete time model of a quadcopter
Ts = 0.2
M = 2.0
Ad = sparse.csc_matrix([
[1.0, Ts],
[0, 1.0]
])
Bd = sparse.csc_matrix([
[0.0],
[Ts/M]])
[nx, nu] = Bd.shape # number of states and number or inputs
# Constraints
uref = 0
uinit = 0 # not used here
umin = np.array([-1000.0]) - uref
umax = np.array([1000.0]) - uref
xmin = np.array([-100.0, -100.0])
xmax = np.array([100.0, 100.0])
# Objective function
Q = sparse.diags([0.2, 0.3])
QN = sparse.diags([0.4, 0.5]) # final cost
R = 0.1*sparse.eye(1)
# Initial and reference states
x0 = np.array([0.1, 0.2]) # initial state
# Reference input and states
pref = 7.0
vref = 0
xref = np.array([pref, vref]) # reference state
# Prediction horizon
Np = 20
# Define problem
u = Variable((nu, Np))
x = Variable((nx, Np + 1))
x_init = Parameter(nx)
objective = 0
constraints = [x[:,0] == x_init]
for k in range(Np):
objective += quad_form(x[:, k] - xref, Q) + quad_form(u[:, k], R)
constraints += [x[:, k+1] == Ad*x[:, k] + Bd*u[:, k]]
constraints += [xmin <= x[:, k], x[:, k] <= xmax]
constraints += [umin <= u[:, k], u[:, k] <= umax]
objective += quad_form(x[:, Np] - xref, QN)
prob = Problem(Minimize(objective), constraints)
# Simulate in closed loop
# Simulate in closed loop
len_sim = 15 # simulation length (s)
nsim = int(len_sim/Ts) # simulation length(timesteps)
xsim = np.zeros((nsim,nx))
usim = np.zeros((nsim,nu))
tsim = np.arange(0,nsim)*Ts
uminus1_val = uinit # initial previous measured input is the input at time instant -1.
time_start = time.time()
for i in range(nsim):
x_init.value = x0
#uminus1.value = uminus1_val
prob.solve(solver=OSQP, warm_start=True)
uMPC = u[:,0].value
usim[i,:] = uMPC
x0 = Ad.dot(x0) + Bd.dot(uMPC)
xsim[i,:] = x0
uminus1_val = uMPC # or a measurement if the input is affected by noise
time_sim = time.time() - time_start
# In [1]
import matplotlib.pyplot as plt
fig,axes = plt.subplots(3,1, figsize=(10,10))
axes[0].plot(tsim, xsim[:,0], "k", label='p')
axes[0].plot(tsim, xref[0]*np.ones(np.shape(tsim)), "r--", label="pref")
axes[0].set_title("Position (m)")
axes[1].plot(tsim, xsim[:,1], label="v")
axes[1].plot(tsim, xref[1]*np.ones(np.shape(tsim)), "r--", label="vref")
axes[1].set_title("Velocity (m/s)")
axes[2].plot(tsim, usim[:,0], label="u")
axes[2].plot(tsim, uref*np.ones(np.shape(tsim)), "r--", label="uref")
axes[2].set_title("Force (N)")
for ax in axes:
ax.grid(True)
ax.legend()
|
# Задача 1, Вариант 14
# Напишите программу, которая будет сообщать род деятельности и псевдоним под которым скрывается Мари Фрасуа Аруэ. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
# Моренко А.А.
# 07.03.2016
print("Мари Франсуа Аруэ, – великий французский писатель, поэт, драматург, философ-просветитель, более известный как Вольтер.")
Input("Нажмите Enter для выхода")
|
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_absolute_error, f1_score
import pandas as pd
from pandas.api.types import (
is_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_string_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
)
# if the number is 4, then it is possible to detect patterns when there are at least 4 times the same observation. If the limit is increased, the minimum observations also increase. This is important, because this is the limit when sklearn will throw an error which will lead to a score of 0 if we catch it
CV_ITERATIONS = 4
RANDOM_SEED = 587136
# if a numeric column has less than 15 unique values, it is inferred as categoric
# thus, the ppscore will use a classification
# this has important implications on the ppscore
# eg if you have 4 equal categories encoded 0, 1, 2, 3 and treat it as a regression
# then the baseline is 1 (median) which is okayish and a predictor will have a harder time
# to beat the baseline, thus the ppscore will be considerably lower
# if the column is encoded as category, then the baseline will be to always predict 0
# this baseline will be way easier to beat and thus result in a higher ppscore
NUMERIC_AS_CATEGORIC_BREAKPOINT = 15
def _calculate_model_cv_score_(df, target, feature, metric, model, **kwargs):
"Calculates the mean model score based on cross-validation"
# Sources about the used methods:
# https://scikit-learn.org/stable/modules/tree.html
# https://scikit-learn.org/stable/modules/cross_validation.html
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html
# shuffle the rows - this is important for crossvalidation
# because the crossvalidation just takes the first n lines
# if there is a strong pattern in the rows eg 0,0,0,0,1,1,1,1
# then this will lead to problems because the first cv sees mostly 0 and the later 1
# this approach might be wrong for timeseries because it might leak information
df = df.sample(frac=1, random_state=RANDOM_SEED, replace=False)
# preprocess target
if df[target].dtype == object:
le = preprocessing.LabelEncoder()
df[target] = le.fit_transform(df[target])
target_series = df[target]
else:
target_series = df[target]
# preprocess feature
if df[feature].dtype == object:
one_hot_encoder = preprocessing.OneHotEncoder()
sparse_matrix = one_hot_encoder.fit_transform(df[feature].values.reshape(-1, 1))
feature_df = sparse_matrix
else:
# reshaping needed because there is only 1 feature
feature_df = df[feature].values.reshape(-1, 1)
# Crossvalidation is stratifiedKFold for classification, KFold for regression
# CV on one core (n_job=1; default) has shown to be fastest
scores = cross_val_score(
model, feature_df, target_series, cv=CV_ITERATIONS, scoring=metric
)
return scores.mean()
def _normalized_mae_score(model_mae, naive_mae):
"Normalizes the model MAE score, given the baseline score"
# # Value range of MAE is [0, infinity), 0 is best
# 10, 5 >> 0 because worse than naive
# 10, 20 >> 0.5
# 5, 20 >> 0.75 = 1 - (mae/base_mae)
if model_mae > naive_mae:
return 0
else:
return 1 - (model_mae / naive_mae)
def _mae_normalizer(df, y, model_score):
"In case of MAE, calculates the baseline score for y and derives the PPS."
df["naive"] = df[y].median()
baseline_score = mean_absolute_error(df[y], df["naive"]) # true, pred
ppscore = _normalized_mae_score(abs(model_score), baseline_score)
return ppscore, baseline_score
def _normalized_f1_score(model_f1, baseline_f1):
"Normalizes the model F1 score, given the baseline score"
# # F1 ranges from 0 to 1
# # 1 is best
# 0.5, 0.7 = 0 because worse than naive
# 0.75, 0.5 > 0.5
#
if model_f1 < baseline_f1:
return 0
else:
scale_range = 1.0 - baseline_f1 # eg 0.3
f1_diff = model_f1 - baseline_f1 # eg 0.1
return f1_diff / scale_range # 0.1/0.3 = 0.33
def _f1_normalizer(df, y, model_score):
"In case of F1, calculates the baseline score for y and derives the PPS."
df["naive"] = df[y].value_counts().index[0]
baseline_score = f1_score(df[y], df["naive"], average="weighted")
ppscore = _normalized_f1_score(model_score, baseline_score)
return ppscore, baseline_score
TASKS = {
"regression": {
"metric_name": "mean absolute error",
"metric_key": "neg_mean_absolute_error",
"model": tree.DecisionTreeRegressor(),
"score_normalizer": _mae_normalizer,
},
"classification": {
"metric_name": "weighted F1",
"metric_key": "f1_weighted",
"model": tree.DecisionTreeClassifier(),
"score_normalizer": _f1_normalizer,
},
"predict_itself": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_constant": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_id": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
}
def _infer_task(df, x, y):
"Returns str with the name of the inferred task based on the columns x and y"
if x == y:
return "predict_itself"
category_count = df[y].value_counts().count()
if category_count == 1:
return "predict_constant"
if category_count == 2:
return "classification"
if category_count == len(df[y]) and (
is_string_dtype(df[y]) or is_categorical_dtype(df[y])
):
return "predict_id"
if category_count <= NUMERIC_AS_CATEGORIC_BREAKPOINT and is_numeric_dtype(df[y]):
return "classification"
if is_bool_dtype(df[y]) or is_string_dtype(df[y]) or is_categorical_dtype(df[y]):
return "classification"
if is_datetime64_any_dtype(df[y]) or is_timedelta64_dtype(df[y]):
raise Exception(
f"The target column {y} has the dtype {df[y].dtype} which is not supported. A possible solution might be to convert {y} to a string column"
)
# this check needs to be after is_bool_dtype because bool is considered numeric by pandas
if is_numeric_dtype(df[y]):
return "regression"
raise Exception(
f"Could not infer a valid task based on the target {y}. The dtype {df[y].dtype} is not yet supported"
) # pragma: no cover
def _feature_is_id(df, x):
"Returns Boolean if the feature column x is an ID"
if not (is_string_dtype(df[x]) or is_categorical_dtype(df[x])):
return False
category_count = df[x].value_counts().count()
return category_count == len(df[x])
def _maybe_sample(df, sample):
"""
Maybe samples the rows of the given df to have at most ``sample`` rows
If sample is ``None`` or falsy, there will be no sampling.
If the df has fewer rows than the sample, there will be no sampling.
Parameters
----------
df : pandas.DataFrame
Dataframe that might be sampled
sample : int or ``None``
Number of rows to be sampled
Returns
-------
pandas.DataFrame
DataFrame after potential sampling
"""
if sample and len(df) > sample:
# this is a problem if x or y have more than sample=5000 categories
# TODO: dont sample when the problem occurs and show warning
df = df.sample(sample, random_state=RANDOM_SEED, replace=False)
return df
def score(df, x, y, task=None, sample=5000):
"""
Calculate the Predictive Power Score (PPS) for "x predicts y"
The score always ranges from 0 to 1 and is data-type agnostic.
A score of 0 means that the column x cannot predict the column y better than a naive baseline model.
A score of 1 means that the column x can perfectly predict the column y given the model.
A score between 0 and 1 states the ratio of how much potential predictive power the model achieved compared to the baseline model.
Parameters
----------
df : pandas.DataFrame
Dataframe that contains the columns x and y
x : str
Name of the column x which acts as the feature
y : str
Name of the column y which acts as the target
task : str, default ``None``
Name of the prediction task, e.g. ``classification`` or ``regression``
If the task is not specified, it is infered based on the y column
The task determines which model and evaluation score is used for the PPS
sample : int or ``None``
Number of rows for sampling. The sampling decreases the calculation time of the PPS.
If ``None`` there will be no sampling.
Returns
-------
Dict
A dict that contains multiple fields about the resulting PPS.
The dict enables introspection into the calculations that have been performed under the hood
"""
if x == y:
task_name = "predict_itself"
else:
# TODO: log.warning when values have been dropped
df = df[[x, y]].dropna()
if len(df) == 0:
raise Exception("After dropping missing values, there are no valid rows left")
df = _maybe_sample(df, sample)
if task is None:
task_name = _infer_task(df, x, y)
else:
task_name = task
task = TASKS[task_name]
if task_name in ["predict_constant", "predict_itself"]:
model_score = 1
ppscore = 1
baseline_score = 1
elif task_name == "predict_id": # target is id
model_score = 0
ppscore = 0
baseline_score = 0
elif _feature_is_id(df, x):
model_score = 0
ppscore = 0
baseline_score = 0
else:
model_score = _calculate_model_cv_score_(
df, target=y, feature=x, metric=task["metric_key"], model=task["model"]
)
ppscore, baseline_score = task["score_normalizer"](df, y, model_score)
return {
"x": x,
"y": y,
"task": task_name,
"ppscore": ppscore,
"metric": task["metric_name"],
"baseline_score": baseline_score,
"model_score": abs(model_score), # sklearn returns negative mae
"model": task["model"],
}
# def predictors(df, y, task=None, sorted=True):
# pass
def matrix(df, output="df", **kwargs):
"""
Calculate the Predictive Power Score (PPS) matrix for all columns in the dataframe
Parameters
----------
df : pandas.DataFrame
The dataframe that contains the data
output: str - potential values: "df", "dict"
Control the type of the output. Either return a df or a dict with all the PPS dicts arranged by the target column
kwargs:
Other key-word arguments that shall be forwarded to the pps.score method
Returns
-------
pandas.DataFrame or Dict
Either returns a df or a dict with all the PPS dicts arranged by the target column. This can be influenced by the output argument
"""
data = {}
columns = list(df.columns)
for target in columns:
scores = []
for feature in columns:
# single_score = score(df, x=feature, y=target)["ppscore"]
try:
single_score = score(df, x=feature, y=target, **kwargs)["ppscore"]
except:
# TODO: log error
single_score = 0
scores.append(single_score)
data[target] = scores
if output == "df":
matrix = pd.DataFrame.from_dict(data, orient="index")
matrix.columns = columns
return matrix
else: # output == "dict"
return data
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
import argparse
import pathlib
import sys
from direct.types import FileOrUrl, PathOrString
from direct.utils.io import check_is_valid_url
def is_file(path):
path = pathlib.Path(path)
if path.is_file():
return path
raise argparse.ArgumentTypeError(f"{path} is not a valid file or url.")
def file_or_url(path: PathOrString) -> FileOrUrl:
if check_is_valid_url(path):
return FileOrUrl(path)
path = pathlib.Path(path)
if path.is_file():
return FileOrUrl(path)
raise argparse.ArgumentTypeError(f"{path} is not a valid file or url.")
def check_train_val(key, name):
if key is not None and len(key) != 2:
sys.exit(f"--{name} has to be of the form `train_folder, validation_folder` if a validation folder is set.")
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('store.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#
# Py-Alpha-AMD Registration Framework
# Author: Johan Ofverstedt
# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information
#
# Copyright 2019 Johan Ofverstedt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#
# Registration framework
#
# Import Numpy/Scipy
import numpy as np
import scipy as sp
import scipy.misc
# Import transforms
from transforms import CompositeTransform
from transforms import AffineTransform
from transforms import Rigid2DTransform
from transforms import Rotate2DTransform
from transforms import TranslationTransform
from transforms import ScalingTransform
# Import distances
from distances import QuantizedImage
from distances import alpha_amd
from distances import symmetric_amd_distance
# Import optimizers
from optimizers import GradientDescentOptimizer
# Import generators and filters
import generators
import filters
# Import misc
import math
import sys
import time
import cProfile, pstats
class Register:
def __init__(self, dim):
self.dim = dim
self.sampling_fraction = 1.0
self.step_lengths = np.array([[0.1, 1.0]])
self.iterations = 1500
self.alpha_levels = 7
self.gradient_magnitude_threshold = 0.00001
self.ref_im = None
self.flo_im = None
self.ref_mask = None
self.flo_mask = None
self.ref_weights = None
self.flo_weights = None
# Transforms
self.initial_transforms = []
self.transforms_param_scaling = []
self.output_transforms = []
self.values = []
self.value_history = []
# Resolution pyramid levels
self.pyramid_factors = []
self.pyramid_sigmas = []
self.distances = []
# Reporting/Output
self.report_func = None
self.report_freq = 25
def add_initial_transform(self, transform, param_scaling=None):
if param_scaling is None:
param_scaling = np.ones((transforms.get_param_count(),))
self.initial_transforms.append(transform)
self.transforms_param_scaling.append(param_scaling)
def add_initial_transforms(self, transforms, param_scaling=None):
for i, t in enumerate(transforms):
if param_scaling is None:
pscaling = np.ones((transforms.get_param_count(),))
else:
pscaling = param_scaling[i]
self.add_initial_transform(t, pscaling)
def clear_transforms(self):
self.initial_transforms = []
self.output_transforms = []
self.transforms_param_scaling = []
self.values = []
self.value_history = []
def get_output(self, index):
return self.output_transforms[index], self.values[index]
def get_value_history(self, index, level):
return self.value_history[index][level]
def add_pyramid_level(self, factor, sigma):
self.pyramid_factors.append(factor)
self.pyramid_sigmas.append(sigma)
def add_pyramid_levels(self, factors, sigmas):
for i in range(len(factors)):
self.add_pyramid_level(factors[i], sigmas[i])
def get_pyramid_level_count(self):
return len(self.pyramid_factors)
def set_sampling_fraction(self, sampling_fraction):
self.sampling_fraction = sampling_fraction
def set_iterations(self, iterations):
self.iterations = iterations
def set_alpha_levels(self, alpha_levels):
self.alpha_levels = alpha_levels
def set_step_lengths(self, step_lengths):
self.step_lengths = np.array(step_lengths)#np.array([start_step_length, end_step_length])
def set_reference_image(self, image, spacing = None):
self.ref_im = image
if spacing is None:
self.ref_spacing = np.ones(image.ndim)
else:
self.ref_spacing = spacing
def set_floating_image(self, image, spacing = None):
self.flo_im = image
if spacing is None:
self.flo_spacing = np.ones(image.ndim)
else:
self.flo_spacing = spacing
def set_reference_mask(self, mask):
self.ref_mask = mask
def set_floating_mask(self, mask):
self.flo_mask = mask
def set_reference_weights(self, weights):
self.ref_weights = weights
def set_floating_weights(self, weights):
self.flo_weights = weights
def set_gradient_magnitude_threshold(self, t):
self.gradient_magnitude_threshold = t
def set_report_freq(self, freq):
self.report_freq = freq
def set_report_func(self, func):
self.report_func = func
def initialize(self, pyramid_images_output_path=None):
if len(self.pyramid_factors) == 0:
self.add_pyramid_level(1, 0.0)
if len(self.initial_transforms) == 0:
self.add_initial_transform(AffineTransform(self.dim))
### Preprocessing
pyramid_levels = len(self.pyramid_factors)
for i in range(pyramid_levels):
factor = self.pyramid_factors[i]
ref_resampled = filters.downsample(filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]), factor)
flo_resampled = filters.downsample(filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]), factor)
ref_mask_resampled = filters.downsample(self.ref_mask, factor)
flo_mask_resampled = filters.downsample(self.flo_mask, factor)
ref_resampled = filters.normalize(ref_resampled, 0.0, ref_mask_resampled)
flo_resampled = filters.normalize(flo_resampled, 0.0, flo_mask_resampled)
if pyramid_images_output_path is not None and ref_resampled.ndim == 2:
scipy.misc.imsave('%sref_resampled_%d.png' % (pyramid_images_output_path, i+1), ref_resampled)
scipy.misc.imsave('%sflo_resampled_%d.png' % (pyramid_images_output_path, i+1), flo_resampled)
if self.ref_weights is None:
ref_weights = np.zeros(ref_resampled.shape)
ref_weights[ref_mask_resampled] = 1.0
else:
ref_weights = filters.downsample(self.ref_weights, factor)
if self.flo_weights is None:
flo_weights = np.zeros(flo_resampled.shape)
flo_weights[flo_mask_resampled] = 1.0
else:
flo_weights = filters.downsample(self.flo_weights, factor)
ref_diag = np.sqrt(np.square(np.array(ref_resampled.shape)*self.ref_spacing).sum())
flo_diag = np.sqrt(np.square(np.array(flo_resampled.shape)*self.flo_spacing).sum())
q_ref = QuantizedImage(ref_resampled, self.alpha_levels, ref_weights, self.ref_spacing*factor, remove_zero_weight_pnts = True)
q_flo = QuantizedImage(flo_resampled, self.alpha_levels, flo_weights, self.flo_spacing*factor, remove_zero_weight_pnts = True)
tf_ref = alpha_amd.AlphaAMD(q_ref, self.alpha_levels, ref_diag, self.ref_spacing*factor, ref_mask_resampled, ref_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)
tf_flo = alpha_amd.AlphaAMD(q_flo, self.alpha_levels, flo_diag, self.flo_spacing*factor, flo_mask_resampled, flo_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)
symmetric_measure = True
squared_measure = False
sym_dist = symmetric_amd_distance.SymmetricAMDDistance(symmetric_measure=symmetric_measure, squared_measure=squared_measure)
sym_dist.set_ref_image_source(q_ref)
sym_dist.set_ref_image_target(tf_ref)
sym_dist.set_flo_image_source(q_flo)
sym_dist.set_flo_image_target(tf_flo)
sym_dist.set_sampling_fraction(self.sampling_fraction)
sym_dist.initialize()
self.distances.append(sym_dist)
def run(self):
pyramid_level_count = len(self.pyramid_factors)
transform_count = len(self.initial_transforms)
for t_it in range(transform_count):
init_transform = self.initial_transforms[t_it]
param_scaling = self.transforms_param_scaling[t_it]
self.value_history.append([])
for lvl_it in range(pyramid_level_count):
opt = GradientDescentOptimizer(self.distances[lvl_it], init_transform.copy())
if self.step_lengths.ndim == 1:
opt.set_step_length(self.step_lengths[0], self.step_lengths[1])
else:
opt.set_step_length(self.step_lengths[lvl_it, 0], self.step_lengths[lvl_it, 1])
opt.set_scalings(param_scaling)
opt.set_gradient_magnitude_threshold(self.gradient_magnitude_threshold)
opt.set_report_freq(self.report_freq)
if type(self.report_func) is list or type(self.report_func) is tuple:
opt.set_report_callback(self.report_func[t_it])
else:
opt.set_report_callback(self.report_func)
if isinstance(self.iterations, int):
itercount = self.iterations
else:
assert(len(self.iterations) == pyramid_level_count)
itercount = self.iterations[lvl_it]
opt.optimize(itercount)
if lvl_it + 1 == pyramid_level_count:
self.output_transforms.append(opt.get_transform())
self.values.append(opt.get_value())
self.initial_transforms[t_it] = opt.get_transform()
else:
init_transform = opt.get_transform()
self.value_history[-1].append(opt.get_value_history())
|
from .data_loader import StackOverflowLRDataLoader
__all__ = [
'StackOverflowLRDataLoader',
]
|
import argparse
import discretisedfield as df
def convert_files(input_files, output_files):
for input_file, output_file in zip(input_files, output_files):
field = df.Field.fromfile(input_file)
field.write(output_file)
def main():
parser = argparse.ArgumentParser(
prog='ovf2vtk',
description='ovf2vtk - ovf to VTK format conversion'
)
parser.add_argument('--infile', type=argparse.FileType('r'),
help='One or more input files', nargs='+',
required=True)
parser.add_argument('--outfile', type=argparse.FileType('w'), nargs='+',
help='One or more output files, optional')
args = parser.parse_args()
if args.outfile:
if len(args.infile) == len(args.outfile):
input_files = [f.name for f in args.infile]
output_files = [f.name for f in args.outfile]
else:
print('\nError: The number of input and output '
'files does not match.')
return 0
else:
input_files = [f.name for f in args.infile]
output_files = [f'{f.split(".")[0]}.vtk' for f in input_files]
convert_files(input_files, output_files)
if __name__ == "__main__":
main()
|
import os
import argparse
# import json
from wallstreet import Stock
from wallstreet_cli import xetra
from forex_python.converter import CurrencyRates
LOCAL_DB_PATH = os.path.join(os.path.dirname(__file__), "data", "db.txt")
def _currency_conversion(source_v: float, source_currency: str, target_currency: str):
"""Convert source currency to target currency
Args:
source_v (float):
source_currency (str): designation of source currency
target_currency (str): [description]
"""
c = CurrencyRates()
return c.convert(source_currency, target_currency, source_v)
def _get_stock_price(stock_name: str):
try:
return xetra.pipeline([stock_name])
except IndexError:
print("Ticker not found!")
return None
# TODO (easy): handle other exceptions. try using "APPL" as arguement for --stock,
# unknown error occured
def _get_all_fav_stock_prices(show_command):
xetra.pipeline(_get_fav_tickers(), show_command)
# for stock in _get_fav_tickers():
# xetra.get_stock_from_dataset(stock, csv_list)
# show_stock(stock)
def _find_ticker(company_name):
"""give the company_name, finds the ticker name"""
# TODO (harder) having a function to search for tickers by just giving a company name
# probably need to make an api request to some search engine
pass
def show_stock(stock_name: str):
"""show stock price of certain stock
Args:
stock_name (str): [description]
"""
# TODO (easy): take currency as arguement and show stock prices in different currencies
price_in_usd = _get_stock_price(stock_name)
if not price_in_usd:
return
price_in_eur = _currency_conversion(price_in_usd, "USD", "EUR")
print(f"{stock_name}: {round(price_in_eur, 2)} EUR")
def _append_fav_ticker(l_of_tickers: list, db_path: str=LOCAL_DB_PATH):
"""append a list of tickers to a json file
Args:
l_of_tickers (list): list of tickers to add to favorites
db_path (str, optional): path to store the fav file. Defaults to LOCAL_DB_PATH.
"""
# create the folder if not yet initialized
if not os.path.exists(db_path):
os.makedirs(os.path.dirname(db_path), exist_ok=True)
# read the json file from local path
# update the list
l_of_tickers = l_of_tickers + _get_fav_tickers()
file = open(db_path, "w")
file.write("{}".format(l_of_tickers))
file.close()
def _get_fav_tickers(db_path: str=LOCAL_DB_PATH):
"""read from the local json file, get all fav tickers
Returns a list of strings
"""
# return list of tickers from file
if not os.path.exists(db_path):
return []
file = open(db_path, "r")
content = file.read()
file.close()
output = content.strip("][").replace("'", "").split(", ")
return output
def main():
## TODO clean tmp files
parser = argparse.ArgumentParser(description="cli for wallstreet")
parser.add_argument("--stock", help="show stock price of ticker")
parser.add_argument("--currency", default="EUR", help="currency")
parser.add_argument("-s", default=False, action="store_true")
parser.add_argument("--add_fav", default=None, help="show stock price of ticker")
parser.add_argument("--show_fav", default=False, action="store_true")
args = parser.parse_args()
if args.stock:
show_stock(args.stock)
elif args.show_fav:
_get_all_fav_stock_prices(args.s)
elif args.add_fav:
_append_fav_ticker(args.add_fav.split(","))
if __name__ == "__main__":
main()
|
"""
The core part of the SOTA model of CPSC2019,
branched, and has different scope (in terms of dilation) in each branch
"""
from copy import deepcopy
from itertools import repeat
from collections import OrderedDict
from typing import Union, Optional, Sequence, NoReturn
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import torch
from torch import nn
from torch import Tensor
from ...cfg import CFG, DEFAULTS
from ...utils.utils_nn import compute_module_size, SizeMixin
from ...utils.misc import dict_to_str
from ...models._nets import (
Conv_Bn_Activation,
DownSample,
NonLocalBlock, SEBlock, GlobalContextBlock,
)
if DEFAULTS.torch_dtype == torch.float64:
torch.set_default_tensor_type(torch.DoubleTensor)
__all__ = [
"MultiScopicCNN",
"MultiScopicBasicBlock",
"MultiScopicBranch",
]
class MultiScopicBasicBlock(SizeMixin, nn.Sequential):
""" finished, checked,
basic building block of the CNN part of the SOTA model
from CPSC2019 challenge (entry 0416)
(conv -> activation) * N --> bn --> down_sample
"""
__DEBUG__ = False
__name__ = "MultiScopicBasicBlock"
def __init__(self,
in_channels:int,
scopes:Sequence[int],
num_filters:Union[int,Sequence[int]],
filter_lengths:Union[int,Sequence[int]],
subsample_length:int,
groups:int=1,
**config) -> NoReturn:
""" finished, checked,
Parameters
----------
in_channels: int,
number of channels in the input
scopes: sequence of int,
scopes of the convolutional layers, via `dilation`
num_filters: int or sequence of int,
number of filters of the convolutional layer(s)
filter_lengths: int or sequence of int,
filter length(s) (kernel size(s)) of the convolutional layer(s)
subsample_length: int,
subsample length (ratio) at the last layer of the block
"""
super().__init__()
self.__in_channels = in_channels
self.__scopes = scopes
self.__num_convs = len(self.__scopes)
if isinstance(num_filters, int):
self.__out_channels = list(repeat(num_filters, self.__num_convs))
else:
self.__out_channels = num_filters
assert len(self.__out_channels) == self.__num_convs, \
f"`scopes` indicates {self.__num_convs} convolutional layers, while `num_filters` indicates {len(self.__out_channels)}"
if isinstance(filter_lengths, int):
self.__filter_lengths = list(repeat(filter_lengths, self.__num_convs))
else:
self.__filter_lengths = filter_lengths
assert len(self.__filter_lengths) == self.__num_convs, \
f"`scopes` indicates {self.__num_convs} convolutional layers, while `filter_lengths` indicates {len(self.__filter_lengths)}"
self.__subsample_length = subsample_length
self.__groups = groups
self.config = CFG(deepcopy(config))
conv_in_channels = self.__in_channels
for idx in range(self.__num_convs):
self.add_module(
f"ca_{idx}",
Conv_Bn_Activation(
in_channels=conv_in_channels,
out_channels=self.__out_channels[idx],
kernel_size=self.__filter_lengths[idx],
stride=1,
dilation=self.__scopes[idx],
groups=self.__groups,
batch_norm=self.config.batch_norm,
# kw_bn=self.config.kw_bn,
activation=self.config.activation,
kw_activation=self.config.kw_activation,
kernel_initializer=self.config.kernel_initializer,
kw_initializer=self.config.kw_initializer,
bias=self.config.bias,
)
)
conv_in_channels = self.__out_channels[idx]
self.add_module(
"bn",
nn.BatchNorm1d(self.__out_channels[-1])
)
self.add_module(
"down",
DownSample(
down_scale=self.__subsample_length,
in_channels=self.__out_channels[-1],
groups=self.__groups,
# padding=
batch_norm=False,
mode=self.config.subsample_mode,
)
)
if self.config.dropout > 0:
self.add_module(
"dropout",
nn.Dropout(self.config.dropout, inplace=False)
)
def forward(self, input:Tensor) -> Tensor:
""" finished, checked,
Parameters
----------
input: Tensor,
of shape (batch_size, n_channels, seq_len)
Returns
-------
output: Tensor,
of shape (batch_size, n_channels, seq_len)
"""
output = super().forward(input)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
""" finished, checked,
Parameters
----------
seq_len: int,
length of the 1d sequence
batch_size: int, optional,
the batch size, can be None
Returns
-------
output_shape: sequence,
the output shape of this block, given `seq_len` and `batch_size`
"""
_seq_len = seq_len
for idx, module in enumerate(self):
if idx == self.__num_convs: # bn layer
continue
elif self.config.dropout > 0 and idx == len(self)-1: # dropout layer
continue
output_shape = module.compute_output_shape(_seq_len, batch_size)
_, _, _seq_len = output_shape
return output_shape
class MultiScopicBranch(SizeMixin, nn.Sequential):
""" finished, checked,
branch path of the CNN part of the SOTA model
from CPSC2019 challenge (entry 0416)
"""
__DEBUG__ = False
__name__ = "MultiScopicBranch"
def __init__(self,
in_channels:int,
scopes:Sequence[Sequence[int]],
num_filters:Union[Sequence[int],Sequence[Sequence[int]]],
filter_lengths:Union[Sequence[int],Sequence[Sequence[int]]],
subsample_lengths:Union[int,Sequence[int]],
groups:int=1,
**config) -> NoReturn:
""" finished, checked,
Parameters
----------
in_channels: int,
number of features (channels) of the input
scopes: sequence of sequences of int,
scopes (in terms of `dilation`) for the convolutional layers,
each sequence of int is for one branch
num_filters: sequence of int, or sequence of sequences of int,
number of filters for the convolutional layers,
if is sequence of int,
then convolutionaly layers in one branch will have the same number of filters
filter_lengths: sequence of int, or sequence of sequences of int,
filter length (kernel size) of the convolutional layers,
if is sequence of int,
then convolutionaly layers in one branch will have the same filter length
subsample_lengths: int, or sequence of int,
subsample length (stride) of the convolutional layers,
if is sequence of int,
then convolutionaly layers in one branch will have the same subsample length
groups: int, default 1,
connection pattern (of channels) of the inputs and outputs
config: dict,
other hyper-parameters, including
dropout, activation choices, weight initializer, etc.
"""
super().__init__()
self.__in_channels = in_channels
self.__scopes = scopes
self.__num_blocks = len(self.__scopes)
self.__num_filters = num_filters
assert len(self.__num_filters) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `num_filters` indicates {len(self.__num_filters)}"
self.__filter_lengths = filter_lengths
assert len(self.__filter_lengths) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `filter_lengths` indicates {len(self.__filter_lengths)}"
if isinstance(subsample_lengths, int):
self.__subsample_lengths = list(repeat(subsample_lengths, self.__num_blocks))
else:
self.__subsample_lengths = filter_lengths
assert len(self.__subsample_lengths) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `subsample_lengths` indicates {len(self.__subsample_lengths)}"
self.__groups = groups
self.config = CFG(deepcopy(config))
block_in_channels = self.__in_channels
for idx in range(self.__num_blocks):
self.add_module(
f"block_{idx}",
MultiScopicBasicBlock(
in_channels=block_in_channels,
scopes=self.__scopes[idx],
num_filters=self.__num_filters[idx],
filter_lengths=self.__filter_lengths[idx],
subsample_length=self.__subsample_lengths[idx],
groups=self.__groups,
dropout=self.config.dropouts[idx],
**(self.config.block)
)
)
block_in_channels = self.__num_filters[idx]
def forward(self, input:Tensor) -> Tensor:
""" finished, checked,
Parameters
----------
input: Tensor,
of shape (batch_size, n_channels, seq_len)
Returns
-------
output: Tensor,
of shape (batch_size, n_channels, seq_len)
"""
output = super().forward(input)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
""" finished, checked,
Parameters
----------
seq_len: int,
length of the 1d sequence
batch_size: int, optional,
the batch size, can be None
Returns
-------
output_shape: sequence,
the output shape of this block, given `seq_len` and `batch_size`
"""
_seq_len = seq_len
for idx, module in enumerate(self):
output_shape = module.compute_output_shape(_seq_len, batch_size)
_, _, _seq_len = output_shape
return output_shape
class MultiScopicCNN(SizeMixin, nn.Module):
""" finished, checked,
CNN part of the SOTA model from CPSC2019 challenge (entry 0416)
"""
__DEBUG__ = False
__name__ = "MultiScopicCNN"
def __init__(self, in_channels:int, **config) -> NoReturn:
""" finished, checked,
Parameters
----------
in_channels: int,
number of channels in the input
config: dict,
other hyper-parameters of the Module, ref. corresponding config file
key word arguments that have to be set:
scopes: sequence of sequences of sequences of int,
scopes (in terms of dilation) of each convolution
num_filters: sequence of sequences (of int or of sequences of int),
number of filters of the convolutional layers,
with granularity to each block of each branch,
or to each convolution of each block of each branch
filter_lengths: sequence of sequences (of int or of sequences of int),
filter length(s) (kernel size(s)) of the convolutions,
with granularity to each block of each branch,
or to each convolution of each block of each branch
subsample_lengths: sequence of int or sequence of sequences of int,
subsampling length(s) (ratio(s)) of all blocks,
with granularity to each branch or to each block of each branch,
each subsamples after the last convolution of each block
dropouts: sequence of int or sequence of sequences of int,
dropout rates of all blocks,
with granularity to each branch or to each block of each branch,
each dropouts at the last of each block
groups: int,
connection pattern (of channels) of the inputs and outputs
block: dict,
other parameters that can be set for the building blocks
for a full list of configurable parameters, ref. corr. config file
"""
super().__init__()
self.__in_channels = in_channels
self.config = CFG(deepcopy(config))
self.__scopes = self.config.scopes
self.__num_branches = len(self.__scopes)
if self.__DEBUG__:
print(f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}")
self.branches = nn.ModuleDict()
for idx in range(self.__num_branches):
self.branches[f"branch_{idx}"] = \
MultiScopicBranch(
in_channels=self.__in_channels,
scopes=self.__scopes[idx],
num_filters=self.config.num_filters[idx],
filter_lengths=self.config.filter_lengths[idx],
subsample_lengths=self.config.subsample_lengths[idx],
groups=self.config.groups,
dropouts=self.config.dropouts[idx],
block=self.config.block, # a dict
)
def forward(self, input:Tensor) -> Tensor:
""" finished, checked,
Parameters
----------
input: Tensor,
of shape (batch_size, n_channels, seq_len)
Returns
-------
output: Tensor,
of shape (batch_size, n_channels, seq_len)
"""
branch_out = OrderedDict()
for idx in range(self.__num_branches):
key = f"branch_{idx}"
branch_out[key] = self.branches[key].forward(input)
output = torch.cat(
[branch_out[f"branch_{idx}"] for idx in range(self.__num_branches)],
dim=1, # along channels
)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
""" finished, checked,
Parameters
----------
seq_len: int,
length of the 1d sequence
batch_size: int, optional,
the batch size, can be None
Returns
-------
output_shape: sequence,
the output shape of this block, given `seq_len` and `batch_size`
"""
out_channels = 0
for idx in range(self.__num_branches):
key = f"branch_{idx}"
_, _branch_oc, _seq_len = \
self.branches[key].compute_output_shape(seq_len, batch_size)
out_channels += _branch_oc
output_shape = (batch_size, out_channels, _seq_len)
return output_shape
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import tensor_util
dists = tf.contrib.distributions
class DistributionTest(tf.test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
dists.Normal,
dists.Bernoulli,
dists.Beta,
dists.Chi2,
dists.Exponential,
dists.Gamma,
dists.InverseGamma,
dists.Laplace,
dists.StudentT,
dists.Uniform]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, tf.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape, tf.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
tf.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = dists.Normal(mu=1., sigma=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = dists.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = dists.Normal(mu=1., sigma=2., validate_args=True)
normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(base_params.pop("validate_args"),
copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = dists.Normal(mu, sigma,
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch))
normal = dists.Normal([mu], [sigma],
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([mu], [sigma],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = tf.placeholder(dtype=tf.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None),
lambda: tf.shape(x)))
x = tf.placeholder(dtype=tf.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None),
lambda: tf.shape(x)))
# Test case 3.
x = tf.placeholder(dtype=tf.int32)
is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
if __name__ == '__main__':
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.