text
stringlengths
2
999k
import json import os import sys import disnake from disnake.ext import commands from disnake.ext.commands import Context from helpers import json_manager, checks import logging if not os.path.isfile("../config.json"): sys.exit("'config.json' not found by general-normal! Please add it and try again.") else: with open("../config.json") as file: config = json.load(file) ''' Logging ''' logger = logging.getLogger('discord') logger.setLevel(logging.INFO) handler = logging.FileHandler(filename='../logs/discord.log', encoding='utf-8',mode='w') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) class Owner(commands.Cog, name="owner-normal"): def __init__(self, bot): self.bot = bot @commands.command( name="shutdown", description="Make the bot shutdown.", ) @checks.is_owner() async def shutdown(self, context: Context): """ Makes the bot shutdown. """ embed = disnake.Embed( description="Shutting down. Bye! :wave:", color=0x9C84EF ) logger.info(f"Shutting down. Bye! :wave:") await context.send(embed=embed) await self.bot.close() @commands.command( name="say", description="The bot will say anything you want.", ) @checks.is_owner() async def say(self, context: Context, *, message: str): """ The bot will say anything you want. """ logger.info(f"Saying '{message}'") await context.send(message) @commands.command( name="embed", description="The bot will say anything you want, but within embeds.", ) @checks.is_owner() async def embed(self, context: Context, *, message: str): """ The bot will say anything you want, but within embeds. """ embed = disnake.Embed( description=message, color=0x9C84EF ) logger.info(f"Saying '{message}'") await context.send(embed=embed) @commands.group( name="blacklist" ) async def blacklist(self, context: Context): """ Lets you add or remove a user from not being able to use the bot. """ if context.invoked_subcommand is None: with open("../data/blacklist.json") as file: blacklist = json.load(file) embed = disnake.Embed( title=f"There are currently {len(blacklist['ids'])} blacklisted IDs", description=f"{', '.join(str(id) for id in blacklist['ids'])}", color=0x9C84EF ) await context.send(embed=embed) @blacklist.command( name="add" ) async def blacklist_add(self, context: Context, member: disnake.Member = None): """ Lets you add a user from not being able to use the bot. """ try: user_id = member.id with open("../data/blacklist.json") as file: blacklist = json.load(file) if user_id in blacklist['ids']: embed = disnake.Embed( title="Error!", description=f"**{member.name}** is already in the blacklist.", color=0xE02B2B ) return await context.send(embed=embed) json_manager.add_user_to_blacklist(user_id) embed = disnake.Embed( title="User Blacklisted", description=f"**{member.name}** has been successfully added to the blacklist", color=0x9C84EF ) with open("../data/blacklist.json") as file: blacklist = json.load(file) embed.set_footer( text=f"There are now {len(blacklist['ids'])} users in the blacklist" ) logger.info(f"{member.name} has been added to the blacklist.") await context.send(embed=embed) except: embed = disnake.Embed( title="Error!", description=f"An unknown error occurred when trying to add **{member.name}** to the blacklist.", color=0xE02B2B ) await context.send(embed=embed) @blacklist.command( name="remove" ) async def blacklist_remove(self, context, member: disnake.Member = None): """ Lets you remove a user from not being able to use the bot. """ try: user_id = member.id json_manager.remove_user_from_blacklist(user_id) embed = disnake.Embed( title="User removed from blacklist", description=f"**{member.name}** has been successfully removed from the blacklist", color=0x9C84EF ) with open("../data/blacklist.json") as file: blacklist = json.load(file) embed.set_footer( text=f"There are now {len(blacklist['ids'])} users in the blacklist" ) logger.info(f"{member.name} has been removed from the blacklist.") await context.send(embed=embed) except: embed = disnake.Embed( title="Error!", description=f"**{member.name}** is not in the blacklist.", color=0xE02B2B ) await context.send(embed=embed) def setup(bot): bot.add_cog(Owner(bot))
from django.apps import AppConfig class MarkersConfig(AppConfig): name = 'markers'
from os import ( startfile, getcwd ) from os.path import join from io import BytesIO from csv import ( writer, excel ) from openpyxl import ( Workbook, load_workbook ) from statistics import ( mean, variance, stdev ) from treetopper.plot import Plot from treetopper.timber import ( TimberQuick, TimberFull ) from treetopper.log import Log from treetopper.thin import ( ThinTPA, ThinBA, ThinRD ) from treetopper._exceptions import TargetDensityError from treetopper.fvs import FVS from treetopper._constants import ( math, ALL_SPECIES_NAMES, GRADE_SORT, LOG_LENGTHS, SORTED_HEADS ) from treetopper._utils import ( format_comma, format_pct, extension_check, reorder_dict, check_date, add_logs_to_table_heads ) from treetopper._import_from_sheets import import_from_sheet from treetopper._print_console import ( print_stand_species, print_stand_logs, print_stand_stats ) from treetopper._print_pdf import PDF class Stand(object): """The Stand Class represents a stand of timber that has had an inventory conducted on it. It should made up of plots (Plot Class) which contain trees (Timber Classes). The Stand class will run calculations and statistics of the current stand conditions and it will run calculations of the log merchantabilty for three metrics: logs per acre, log board feet per acre, and log cubic feet per acre, based on log grades, log length ranges and species. """ def __init__(self, name: str, plot_factor: float, acres: float = None, inventory_date: str = None): self.name = name.upper() self.plot_factor = plot_factor self.plots = [] self.plot_count = 0 self.tpa = 0 self.ba_ac = 0 self.qmd = 0 self.rd_ac = 0 self.bf_ac = 0 self.cf_ac = 0 self.avg_hgt = 0 self.hdr = 0 self.vbar = 0 self.tpa_stats = {} self.ba_ac_stats = {} self.rd_ac_stats = {} self.bf_ac_stats = {} self.cf_ac_stats = {} self.species = {} self.species_gross = {} self.species_stats = {} self.logs = {} self.table_data = [] self.summary_stand = [] self.summary_logs = {} self.summary_stats = [] self.metrics = ['tpa', 'ba_ac', 'rd_ac', 'bf_ac', 'cf_ac'] self.attrs = ['_gross', '_stats', ''] self.acres = acres if inventory_date: self.inv_date = check_date(inventory_date) else: self.inv_date = inventory_date def __getitem__(self, attribute: str): return self.__dict__[attribute] def get_stand_table_text(self): """Returns a console-formatted string of current stand conditions""" return print_stand_species(self.summary_stand) def get_logs_table_text(self): """Returns a console-formatted string of stand logs data""" return print_stand_logs(self.summary_logs) def get_stats_table_text(self): """Returns and console-formatted string of stand stand statistics""" return print_stand_stats(self.summary_stats) def get_console_report_text(self): """Returns a console-formatted string of the complete stand report""" return self._compile_report_text() def console_report(self): """Prints a console-formatted string of the complete stand report""" print(self._compile_report_text()) def get_pdf_report_bytes_io(self): pdf = self._compile_pdf_report() return BytesIO(pdf.output(dest='S').encode('latin-1')) def pdf_report(self, filename: str, directory: str = None, start_file_upon_creation: bool = False): """Exports a pdf of the complete stand report to a user specified directory or if directory is None, to the current working directory. Will open the created pdf report if start_file_upon_creation is True""" check = extension_check(filename, '.pdf') if directory: file = join(directory, check) else: file = join(getcwd(), check) pdf = self._compile_pdf_report() pdf.output(file, 'F') if start_file_upon_creation: startfile(file) def add_plot(self, plot: Plot): """Adds a plot to the stand's plots list and re-runs the calculations and statistics of the stand. plot argument needs to be the a Plot Class""" self.plots.append(plot) self.plot_count += 1 for met in self.metrics: self._update_metrics(met) self.qmd = math.sqrt((self.ba_ac / self.tpa) / .005454) self.vbar = self.bf_ac / self.ba_ac self._update_species(plot) self._update_logs(plot) self.table_data = self._update_table_data() self.summary_stand = self._update_summary_stand() self.summary_logs = self._update_summary_logs() self.summary_stats = self._update_summary_stats() def import_sheet_quick(self, file_path: str): """Imports tree and plot data from a CSV or XLSX file for a quick cruise and adds that data to the stand""" plots = import_from_sheet(file_path, self.name, 'q') for plot_num in plots: plot = Plot() for tree in plots[plot_num]: plot.add_tree(TimberQuick(self.plot_factor, *tree)) self.add_plot(plot) def import_sheet_full(self, file_path: str): """Imports tree and plot data from a CSV or XLSX file for a full cruise and adds that data to the stand""" plots = import_from_sheet(file_path, self.name, 'f') for plot_num in plots: plot = Plot() for tree_data in plots[plot_num]: args = tree_data[: -1] logs = tree_data[-1] tree = TimberFull(self.plot_factor, *args) for log in logs: tree.add_log(*log) plot.add_tree(tree) self.add_plot(plot) def table_to_csv(self, filename: str, directory: str = None): """Creates or appends a CSV file with tree data from self.table_data""" check = extension_check(filename, '.csv') if directory: file = join(directory, check) else: file = join(getcwd(), check) if isfile(file): allow = 'a' start = 1 else: allow = 'w' start = 0 with open(file, allow, newline='') as csv_file: csv_write = writer(csv_file, dialect=excel) for i in self.table_data[start:]: csv_write.writerow(i) def table_to_excel(self, filename: str, directory: str = None): """Creates or appends an Excel file with tree data from self.table_data""" check = extension_check(filename, '.xlsx') if directory: file = join(directory, check) else: file = join(getcwd(), check) if isfile(file): wb = load_workbook(file) ws = wb.active for i in self.table_data[1:]: ws.append(i) wb.save(file) else: wb = Workbook() ws = wb.active for i in self.table_data: ws.append(i) wb.save(file) def _update_metrics(self, metric: str): """Updates stand metrics based on the metric entered in the argument, used internally""" metric_list = [plot[metric] for plot in self.plots] stats = self._get_stats(metric_list) setattr(self, metric, stats['mean']) setattr(self, f'{metric}_stats', stats) def _update_species(self, plot): """Re-runs stand conditions calculations and statistics, used internally""" update_after = ['qmd', 'vbar', 'avg_hgt', 'hdr'] if self.plot_count == 0: return else: for species in plot.species: if species not in self.species_gross: for attr in self.attrs: if attr == '_gross': getattr(self, f'species{attr}')[species] = {met: [] for met in self.metrics} else: getattr(self, f'species{attr}')[species] = {met: 0 for met in self.metrics} for key in plot.species[species]: if key not in update_after: self.species_gross[species][key].append(plot.species[species][key]) for species in self.species_gross: for key in self.species_gross[species]: if key not in update_after: data = self.species_gross[species][key] if len(data) < self.plot_count: data += ([0] * (self.plot_count - len(data))) stats = self._get_stats(data) self.species[species][key] = stats['mean'] self.species_stats[species][key] = stats self.species[species]['qmd'] = math.sqrt((self.species[species]['ba_ac'] / self.species[species]['tpa']) / 0.005454) self.species[species]['vbar'] = self.species[species]['bf_ac'] / self.species[species]['ba_ac'] if species == 'totals_all': self.species[species]['avg_hgt'] = mean([p.avg_hgt for p in self.plots]) self.species[species]['hdr'] = mean([p.hdr for p in self.plots]) else: trees = [] for p in self.plots: for t in p.trees: trees.append(t) self.species[species]['avg_hgt'] = mean([t.height for t in trees if t.species == species]) self.species[species]['hdr'] = mean([t.hdr for t in trees if t.species == species]) def _update_logs(self, plot): """Re-runs stand logs calculations, used internally""" if self.plot_count == 0: return else: subs = ['lpa', 'bf_ac', 'cf_ac'] for species in plot.logs: if species not in self.logs: self.logs[species] = {} for grade in plot.logs[species]: if grade not in self.logs[species]: self.logs[species][grade] = {rng: {sub: {'gross': [], 'mean': 0} for sub in subs} for rng in LOG_LENGTHS} self.logs[species][grade]['totals_by_grade'] = {sub: {'gross': [], 'mean': 0} for sub in subs} for rng in plot.logs[species][grade]: if rng != 'display': for sub in subs: self.logs[species][grade][rng][sub]['gross'].append(plot.logs[species][grade][rng][sub]) for species in self.logs: for grade in self.logs[species]: for rng in self.logs[species][grade]: for sub in subs: gross = self.logs[species][grade][rng][sub]['gross'] if len(gross) < self.plot_count: gross += ([0] * (self.plot_count - len(gross))) self.logs[species][grade][rng][sub]['mean'] = mean(gross) def _update_table_data(self): """Converts stand data to plot/tree inventory data table layout, used internally""" heads = ['Stand', 'Plot Number', 'Tree Number', 'Species', 'DBH', 'Height', 'Stump Height', 'Log 1 Length', 'Log 1 Grade', 'Log 1 Defect', 'Between Logs Feet'] master = [] max_logs = [] for i, plot in enumerate(self.plots): for j, tree in enumerate(plot.trees): temp = [self.name, i + 1, j + 1] for key in ['species', 'dbh', 'height']: temp.append(tree[key]) len_logs = len(tree.logs) max_logs.append(len_logs) for k, lnum in enumerate(tree.logs): log = tree.logs[lnum] if lnum == 1: temp.append(log.stem_height - log.length - 1) for lkey in ['length', 'grade', 'defect']: temp.append(log[lkey]) if k < len(tree.logs) - 1: between = tree.logs[lnum+1].stem_height - log.stem_height - tree.logs[lnum+1].length - 1 if between < 0: temp.append(0) else: temp.append(between) master.append(temp) heads += add_logs_to_table_heads(max(max_logs)) len_heads = len(heads) for i in master: len_i = len(i) if len_i < len_heads: i += ['' for j in range(len_heads - len_i)] master.insert(0, heads) return master def _update_summary_stand(self): """Updates the current stand conditions list of stand.summary_stand, used internally""" heads = ['SPECIES'] + [head[1] for head in SORTED_HEADS] body_data = [] for key in self.species: if key == 'totals_all': show = 'TOTALS' else: show = key temp = [str(show)] + [format_comma(self.species[key][i[0]]) for i in SORTED_HEADS] body_data.append(temp) body_data.append(body_data.pop(0)) body_data.insert(0, heads) return body_data def _update_summary_logs(self): """Updates the stand logs summary dict, data-tables are broken down by metric type --> species, used internally. Example: self.summary_logs['BOARD FEET PER ACRE']['DF'] --> data table""" table_data = {} tables = [['bf_ac', 'BOARD FEET PER ACRE'], ['cf_ac', 'CUBIC FEET PER ACRE'], ['lpa', 'LOGS PER ACRE']] for table in tables: metric_key = table[0] key = table[1] table_data[key] = {} for species in self.logs: if species == 'totals_all': show = 'TOTALS' else: show = ALL_SPECIES_NAMES[species] table_data[key][show] = [['LOG GRADES'] + [rng.upper() for rng in LOG_LENGTHS] + ['TOTALS']] grade_sort = [] for grade in self.logs[species]: values = [self.logs[species][grade][rng][metric_key]['mean'] for rng in self.logs[species][grade]] if sum(values) > 0: if grade == 'totals_by_length': col_text = 'TOTALS' else: col_text = grade grade_sort.append([col_text] + [format_comma(z) for z in values]) grade_sort = sorted(grade_sort, key=lambda x: GRADE_SORT[x[0]]) for g in grade_sort: table_data[key][show].append(g) table_data[key] = reorder_dict(table_data[key]) return table_data def _update_summary_stats(self): """Updates the stand statistics dict, stats-tables are broken down by species, used internally. Example: self.summary_stats['DF'] --> stats-table""" tables = {} for spp in self.species_stats: if spp == 'totals_all': show = 'TOTALS' else: show = ALL_SPECIES_NAMES[spp] tables[show] = [['METRIC'] + [head.upper() for head in self.species_stats[spp]['tpa'] if head != 'low_avg_high'] + ['LOW', 'AVERAGE', 'HIGH']] for key in self.species_stats[spp]: temp = [key.upper()] not_enough_data = False for sub in self.species_stats[spp][key]: x = self.species_stats[spp][key][sub] if not_enough_data: if x == 'Not enough data': if sub == 'low_avg_high': for i in range(3): temp.append('-') else: temp.append('-') else: if x == 'Not enough data': temp.append(x) not_enough_data = True else: if sub == 'low_avg_high': for i in x: temp.append(format_comma(i)) elif sub == 'stderr_pct': temp.append(format_pct(x)) else: temp.append(format_comma(x)) tables[show].append(temp) return reorder_dict(tables) def _get_stats(self, data): """Runs the statistical calculations on a set of the stand conditions data, returns an updated sub dict, used internally""" m = mean(data) if len(data) >= 2: std = stdev(data) ste = std / math.sqrt(self.plot_count) low_avg_high = [max(round(m - ste, 1), 0), m, m + ste] d = {'mean': m, 'variance': variance(data), 'stdev': std, 'stderr': ste, 'stderr_pct': (ste / m) * 100, 'low_avg_high': low_avg_high} else: d = {'mean': m, 'variance': 'Not enough data', 'stdev': 'Not enough data', 'stderr': 'Not enough data', 'stderr_pct': 'Not enough data', 'low_avg_high': 'Not enough data'} return d def _compile_report_text(self): """Compiles the console-formatted report of all stand data and stats, used internally""" n = '\n' * 4 console_text = f'{print_stand_species(self.summary_stand)}{n}' console_text += f'{print_stand_logs(self.summary_logs)}{n}' console_text += f'{print_stand_stats(self.summary_stats)}' return console_text def _compile_pdf_report(self): pdf = PDF() pdf.alias_nb_pages() pdf.add_page() pdf.compile_stand_report(self) return pdf if __name__ == '__main__': import argparse import traceback import sys from os import mkdir, getcwd from os.path import join, isfile, isdir, expanduser from treetopper._utils import get_desktop_path def make_dir_and_subdir(workflow_num): desktop = get_desktop_path() tt_dir = join(desktop, 'treetopper_outputs') if not isdir(tt_dir): mkdir(tt_dir) wf_dir = join(tt_dir, f'workflow_{workflow_num}') if not isdir(wf_dir): mkdir(wf_dir) return wf_dir def get_package_path(filename): path = None for i in sys.path: if 'AppData' in i and i[-13:] == 'site-packages': path = i break tt_path = join(path, 'treetopper') sheet_path = join(tt_path, 'example_csv_and_xlsx') final = join(sheet_path, filename) return final parser = argparse.ArgumentParser(description='treetopper Example Workflows') parser.add_argument('workflow_number', help='Enter the number of the workflow to run.\n Valid workflow numbers: 1, 2, 3, 4, 5, 6)') args = parser.parse_args() wf = args.workflow_number while True: if wf not in ['1', '2', '3', '4', '5', '6']: print('Please enter a workflow number 1, 2, 3, 4, 5, or 6') wf = input('Workflow #: ') else: break wf = int(wf) def workflow_1(workflow_number): stand = Stand('WF1', -20) plot_factor = stand.plot_factor tree_data = [ # Plot 1 [TimberQuick(plot_factor, 'DF', 29.5, 119), TimberQuick(plot_factor, 'WH', 18.9, 102), TimberQuick(plot_factor, 'WH', 20.2, 101), TimberQuick(plot_factor, 'WH', 19.9, 100), TimberQuick(plot_factor, 'DF', 20.6, 112)], # Plot 2 [TimberQuick(plot_factor, 'DF', 25.0, 117), TimberQuick(plot_factor, 'DF', 14.3, 105), TimberQuick(plot_factor, 'DF', 20.4, 119), TimberQuick(plot_factor, 'DF', 16.0, 108), TimberQuick(plot_factor, 'RC', 20.2, 124), TimberQuick(plot_factor, 'RC', 19.5, 116), TimberQuick(plot_factor, 'RC', 23.4, 121), TimberQuick(plot_factor, 'DF', 17.8, 116), TimberQuick(plot_factor, 'DF', 22.3, 125)] ] for trees in tree_data: plot = Plot() for tree in trees: plot.add_tree(tree) stand.add_plot(plot) path = make_dir_and_subdir(workflow_number) stand.console_report() stand.table_to_csv(join(path, 'example_csv_export.csv')) thin80tpa = ThinTPA(stand, 80) thin80tpa.console_report() end_message = """**WORKFLOW 1 created a QUICK CRUISE stand from manually entered tree data. It then ran a thinning scenario with a target density of 80 Trees per Acre considering all species and diameter ranges. Outputs: Stand console report in terminal [print(stand_class.console_report)] ^above^ Thinning console report in terminal [print(thin_class.console_report))] ^above^ Plot data .csv "example_csv_export.csv" in desktop/treetopper_outputs/workflow_1/ """ print(f'\n\n{end_message}') def workflow_2(workflow_number): stand = Stand('WF2', 33.3) plot_factor = stand.plot_factor tree_data = [ # Plot 1 [[TimberFull(plot_factor, 'DF', 29.5, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 0], [102, 18, 'S4', 10]]], [TimberFull(plot_factor, 'WH', 18.9, 102), [[42, 40, 'S2', 0], [79, 36, 'S4', 5]]], [TimberFull(plot_factor, 'WH', 20.2, 101), [[42, 40, 'S2', 5], [83, 40, 'S4', 0]]], [TimberFull(plot_factor, 'WH', 19.9, 100), [[42, 40, 'S2', 0], [83, 40, 'S4', 15]]], [TimberFull(plot_factor, 'DF', 20.6, 112), [[42, 40, 'S2', 0], [83, 40, 'S3', 5], [100, 16, 'UT', 10]]]], # Plot 2 [[TimberFull(plot_factor, 'DF', 25.0, 117), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [100, 16, 'S4', 0]]], [TimberFull(plot_factor, 'DF', 14.3, 105), [[42, 40, 'S3', 0], [79, 36, 'S4', 0]]], [TimberFull(plot_factor, 'DF', 20.4, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 5], [100, 16, 'S4', 5]]], [TimberFull(plot_factor, 'DF', 16.0, 108), [[42, 40, 'S3', 5], [83, 40, 'S3', 10]]], [TimberFull(plot_factor, 'RC', 20.2, 124), [[42, 40, 'CR', 5], [83, 40, 'CR', 5], [104, 20, 'CR', 5]]], [TimberFull(plot_factor, 'RC', 19.5, 116), [[42, 40, 'CR', 10], [83, 40, 'CR', 5], [100, 16, 'CR', 0]]], [TimberFull(plot_factor, 'RC', 23.4, 121), [[42, 40, 'CR', 0], [83, 40, 'CR', 0], [106, 22, 'CR', 5]]], [TimberFull(plot_factor, 'DF', 17.8, 116), [[42, 40, 'S2', 0], [83, 40, 'S3', 0], [100, 16, 'S4', 10]]], [TimberFull(plot_factor, 'DF', 22.3, 125), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [108, 24, 'S4', 0]]]] ] for trees in tree_data: plot = Plot() for tree, logs in trees: for log in logs: tree.add_log(*log) plot.add_tree(tree) stand.add_plot(plot) path = make_dir_and_subdir(workflow_number) stand.console_report() stand.table_to_excel(join(path, 'example_xlsx_export.xlsx')) thin120ba = ThinBA(stand, 120, species_to_cut=['DF', 'WH']) thin120ba.console_report() end_message = """**WORKFLOW 2 created a FULL CRUISE stand from manually entered tree data. It then ran a thinning scenario with a target density of 120 Basal Area per Acre harvesting only DF and WH considering all diameter ranges. Outputs: Stand console report in terminal [print(stand_class.console_report)] ^above^ Thinning console report in terminal [print(thin_class.console_report))] ^above^ Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_2/ """ print(f'\n\n{end_message}') def workflow_3(workflow_number): path = make_dir_and_subdir(workflow_number) stand = Stand('EX4', -30) stand.import_sheet_quick(get_package_path('Example_Excel_quick.xlsx')) stand.console_report() stand.table_to_excel(join(path, 'example_xlsx_export.xlsx')) thin25rd = ThinRD(stand, 25, species_to_cut=['DF', 'WH'], min_dbh_to_cut=10, max_dbh_to_cut=18) thin25rd.console_report() end_message = """**WORKFLOW 3 created a QUICK CRUISE stand from importing plot data from an excel sheet. It then ran a thinning scenario with a target density of 25 Relative Density per Acre harvesting only DF and WH, with a minimum dbh of 10 inches and a maximum dbh of 18 inches. ** Note this thinning density won't be able to be achieved fully because our parameters don't allow for the needed harvest density, but this is to illustrate that the thinning will let the user know how much density was taken and how much more is needed to achieve the desired density target Outputs: Stand console report in terminal [print(stand_class.console_report)] ^above^ Thinning console report in terminal [print(thin_class.console_report))] ^above^ Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_3/ """ print(f'\n\n{end_message}') def workflow_4(workflow_number): path = make_dir_and_subdir(workflow_number) stand = Stand('OK2', 46.94) stand.import_sheet_full(get_package_path('Example_CSV_full.csv')) stand.console_report() stand.table_to_excel(join(path, 'example_xlsx_export.xlsx')) try: thin100tpa = ThinTPA(stand, 100) thin100tpa.console_report() except TargetDensityError as e: print(traceback.format_exc()) end_message = """**WORKFLOW 4 created a FULL CRUISE stand from importing plot data from an csv sheet. It then ran a thinning scenario with a target density of 100 Trees per Acre considering all species and diameter ranges. ** Note this thinning density is greater than the current stand density and the Thin Class will throw a TargetDensityError exception which will explain what went wrong. Outputs: Stand console report in terminal [print(stand_class.console_report)] ^above^ Thinning console report in terminal [print(thin_class.console_report))] ^above^ Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_4/ """ print(f'\n\n{end_message}') def workflow_5(workflow_number): path = make_dir_and_subdir(workflow_number) stand = Stand('EX3', 33.3) stand.import_sheet_quick(get_package_path('Example_CSV_quick.csv')) stand.pdf_report(join(path, 'stand_report.pdf')) stand.table_to_excel(join(path, 'example_xlsx_export.xlsx')) thin140ba = ThinBA(stand, 140, species_to_cut=['DF', 'WH', 'RA'], max_dbh_to_cut=24) thin140ba.pdf_report(join(path, 'thin_report.pdf')) end_message = """**WORKFLOW 5 created a QUICK CRUISE stand from importing plot data from an csv sheet. It then ran a thinning scenario with a target density of 140 Basal Area per Acre harvesting only DF, WH and RA with a maximum diameter of 24 inches. Outputs: Stand PDF report "stand_report.pdf" from [stand_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/ Thinning PDF report "thin_report.pdf" from [thin_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/ Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_5/ """ print(f'\n\n{end_message}') def workflow_6(workflow_number): path = make_dir_and_subdir(workflow_number) stand = Stand('OK1', -30) stand.import_sheet_full(get_package_path('Example_Excel_full.xlsx')) stand.table_to_excel(join(path, 'example_xlsx_export.xlsx')) fvs = FVS() fvs.set_stand(stand, 'PN', 612, 6, 45, 'DF', 110) fvs.access_db('access_db', directory=path) fvs.sqlite_db('sqlite_db', directory=path) fvs.excel_db('excel_db', directory=path) end_message = """**WORKFLOW 6 created a FULL CRUISE stand from importing plot data from an excel sheet. It then ran the FVS module to create FVS formatted databases from the stand data. FVS is the US Forest Service's Forest Vegetation Simulator. Outputs: FVS Access database "access_db.db" from [fvs_class.access_db()] in desktop/treetopper_outputs/workflow_6/ FVS Suppose file "Suppose.loc" in desktop/treetopper_outputs/workflow_6/. ** FVS Legacy needs a .loc file along with the database. FVS SQLite database "sqlite_db.db" from [fvs_class.sqlite_db()] in desktop/treetopper_outputs/workflow_6/ FVS Excel database "excel_db.db" from [fvs_class.excel_db()] in desktop/treetopper_outputs/workflow_6/ Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_6/ """ print(f'\n\n{end_message}') def main(workflow_number): opts = { 1: workflow_1, 2: workflow_2, 3: workflow_3, 4: workflow_4, 5: workflow_5, 6: workflow_6 } opts[workflow_number](workflow_number) print(f"\n\n{'-' * 200}\n\n") main(wf) print(f"\n\n{'-' * 200}\n\n")
#!/usr/bin/env python # -*- coding: utf-8 -*- def example1(): """Slice operator. seq[::stride] # [seq[0], seq[stride], ..., seq[-1] ] seq[low::stride] # [seq[low], seq[low+stride], ..., seq[-1] ] seq[:high:stride] # [seq[0], seq[stride], ..., seq[high-1]] seq[low:high:stride] # [seq[low], seq[low+stride], ..., seq[high-1]] """ l = list("01234567") assert l[::2] == list("0246") # 从 index(0) 开始, 隔2个取一个 assert l[1::2] == list("1357") # 从 index(1) 开始, 隔2个取一个 assert l[:4:2] == list("02") # 从头开始到 index(4-1) 为止,隔2个取一个 assert l[2:6:2] == list("24") # 从index(2)开始到index(6-1)为止,隔2个取一个 example1() def example2(): """Reversed slice operator """ l = list("01234567") assert l[::-1] == list("76543210") # 从最后一个开始,逆序排列 assert l[::-2] == list("7531") # 从最后一个开始,隔2个取一个 assert l[-2::-2] == list("6420") # 从-2开始,隔2个取一个 assert l[:3:-2] == list("75") # 从最后开始,到3为止,隔2个取一个 example2()
""" Common logic used by the docker state and execution module This module contains logic to accommodate docker/salt CLI usage, as well as input as formatted by states. """ import copy import logging import salt.utils.args import salt.utils.data import salt.utils.dockermod.translate from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.utils.args import get_function_argspec as _argspec from salt.utils.dockermod.translate.helpers import split as _split try: import docker except ImportError: docker = None # These next two imports are only necessary to have access to the needed # functions so that we can get argspecs for the container config, host config, # and networking config (see the get_client_args() function). try: import docker.types except ImportError: pass try: import docker.utils except ImportError: pass NOTSET = object() __virtualname__ = "docker" # Default timeout as of docker-py 1.0.0 CLIENT_TIMEOUT = 60 # Timeout for stopping the container, before a kill is invoked SHUTDOWN_TIMEOUT = 10 log = logging.getLogger(__name__) def __virtual__(): if docker is None: return False return __virtualname__ def get_client_args(limit=None): if docker is None: raise CommandExecutionError("docker Python module not imported") limit = salt.utils.args.split_input(limit or []) ret = {} if not limit or any( x in limit for x in ("create_container", "host_config", "connect_container_to_network") ): try: ret["create_container"] = _argspec(docker.APIClient.create_container).args except AttributeError: try: ret["create_container"] = _argspec(docker.Client.create_container).args except AttributeError: raise CommandExecutionError("Coult not get create_container argspec") try: ret["host_config"] = _argspec(docker.types.HostConfig.__init__).args except AttributeError: try: ret["host_config"] = _argspec(docker.utils.create_host_config).args except AttributeError: raise CommandExecutionError("Could not get create_host_config argspec") try: ret["connect_container_to_network"] = _argspec( docker.types.EndpointConfig.__init__ ).args except AttributeError: try: ret["connect_container_to_network"] = _argspec( docker.utils.utils.create_endpoint_config ).args except AttributeError: try: ret["connect_container_to_network"] = _argspec( docker.utils.create_endpoint_config ).args except AttributeError: raise CommandExecutionError( "Could not get connect_container_to_network argspec" ) for key, wrapped_func in ( ("logs", docker.api.container.ContainerApiMixin.logs), ("create_network", docker.api.network.NetworkApiMixin.create_network), ): if not limit or key in limit: try: func_ref = wrapped_func try: # functools.wraps makes things a little easier in Python 3 ret[key] = _argspec(func_ref.__wrapped__).args except AttributeError: # functools.wraps changed (unlikely), bail out ret[key] = [] except AttributeError: # Function moved, bail out ret[key] = [] if not limit or "ipam_config" in limit: try: ret["ipam_config"] = _argspec(docker.types.IPAMPool.__init__).args except AttributeError: try: ret["ipam_config"] = _argspec(docker.utils.create_ipam_pool).args except AttributeError: raise CommandExecutionError("Could not get ipam args") for item in ret: # The API version is passed automagically by the API code that imports # these classes/functions and is not an arg that we will be passing, so # remove it if present. Similarly, don't include "self" if it shows up # in the arglist. for argname in ("version", "self"): try: ret[item].remove(argname) except ValueError: pass # Remove any args in host or endpoint config from the create_container # arglist. This keeps us from accidentally allowing args that docker-py has # moved from the create_container function to the either the host or # endpoint config. for item in ("host_config", "connect_container_to_network"): for val in ret.get(item, []): try: ret["create_container"].remove(val) except ValueError: # Arg is not in create_container arglist pass for item in ("create_container", "host_config", "connect_container_to_network"): if limit and item not in limit: ret.pop(item, None) try: ret["logs"].remove("container") except (KeyError, ValueError, TypeError): pass return ret def translate_input( translator, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, **kwargs ): """ Translate CLI/SLS input into the format the API expects. The ``translator`` argument must be a module containing translation functions, within salt.utils.dockermod.translate. A ``skip_translate`` kwarg can be passed to control which arguments are translated. It can be either a comma-separated list or an iterable containing strings (e.g. a list or tuple), and members of that tuple will have their translation skipped. Optionally, skip_translate can be set to True to skip *all* translation. """ kwargs = copy.deepcopy(salt.utils.args.clean_kwargs(**kwargs)) invalid = {} collisions = [] if skip_translate is True: # Skip all translation return kwargs else: if not skip_translate: skip_translate = () else: try: skip_translate = _split(skip_translate) except AttributeError: pass if not hasattr(skip_translate, "__iter__"): log.error("skip_translate is not an iterable, ignoring") skip_translate = () try: # Using list(kwargs) here because if there are any invalid arguments we # will be popping them from the kwargs. for key in list(kwargs): real_key = translator.ALIASES.get(key, key) if real_key in skip_translate: continue # ipam_pools is designed to be passed as a list of actual # dictionaries, but if each of the dictionaries passed has a single # element, it will be incorrectly repacked. if key != "ipam_pools" and salt.utils.data.is_dictlist(kwargs[key]): kwargs[key] = salt.utils.data.repack_dictlist(kwargs[key]) try: kwargs[key] = getattr(translator, real_key)( kwargs[key], validate_ip_addrs=validate_ip_addrs, skip_translate=skip_translate, ) except AttributeError: log.debug("No translation function for argument '%s'", key) continue except SaltInvocationError as exc: kwargs.pop(key) invalid[key] = exc.strerror try: translator._merge_keys(kwargs) except AttributeError: pass # Convert CLI versions of commands to their docker-py counterparts for key in translator.ALIASES: if key in kwargs: new_key = translator.ALIASES[key] value = kwargs.pop(key) if new_key in kwargs: collisions.append(new_key) else: kwargs[new_key] = value try: translator._post_processing(kwargs, skip_translate, invalid) except AttributeError: pass except Exception as exc: # pylint: disable=broad-except error_message = exc.__str__() log.error("Error translating input: '%s'", error_message, exc_info=True) else: error_message = None error_data = {} if error_message is not None: error_data["error_message"] = error_message if invalid: error_data["invalid"] = invalid if collisions and not ignore_collisions: for item in collisions: error_data.setdefault("collisions", []).append( "'{}' is an alias for '{}', they cannot both be used".format( translator.ALIASES_REVMAP[item], item ) ) if error_data: raise CommandExecutionError("Failed to translate input", info=error_data) return kwargs def create_ipam_config(*pools, **kwargs): """ Builds an IP address management (IPAM) config dictionary """ kwargs = salt.utils.args.clean_kwargs(**kwargs) try: # docker-py 2.0 and newer pool_args = salt.utils.args.get_function_argspec( docker.types.IPAMPool.__init__ ).args create_pool = docker.types.IPAMPool create_config = docker.types.IPAMConfig except AttributeError: # docker-py < 2.0 pool_args = salt.utils.args.get_function_argspec( docker.utils.create_ipam_pool ).args create_pool = docker.utils.create_ipam_pool create_config = docker.utils.create_ipam_config for primary_key, alias_key in (("driver", "ipam_driver"), ("options", "ipam_opts")): if alias_key in kwargs: alias_val = kwargs.pop(alias_key) if primary_key in kwargs: log.warning( "docker.create_ipam_config: Both '%s' and '%s' " "passed. Ignoring '%s'", alias_key, primary_key, alias_key, ) else: kwargs[primary_key] = alias_val if salt.utils.data.is_dictlist(kwargs.get("options")): kwargs["options"] = salt.utils.data.repack_dictlist(kwargs["options"]) # Get all of the IPAM pool args that were passed as individual kwargs # instead of in the *pools tuple pool_kwargs = {} for key in list(kwargs): if key in pool_args: pool_kwargs[key] = kwargs.pop(key) pool_configs = [] if pool_kwargs: pool_configs.append(create_pool(**pool_kwargs)) pool_configs.extend([create_pool(**pool) for pool in pools]) if pool_configs: # Sanity check the IPAM pools. docker-py's type/function for creating # an IPAM pool will allow you to create a pool with a gateway, IP # range, or map of aux addresses, even when no subnet is passed. # However, attempting to use this IPAM pool when creating the network # will cause the Docker Engine to throw an error. if any("Subnet" not in pool for pool in pool_configs): raise SaltInvocationError("A subnet is required in each IPAM pool") else: kwargs["pool_configs"] = pool_configs ret = create_config(**kwargs) pool_dicts = ret.get("Config") if pool_dicts: # When you inspect a network with custom IPAM configuration, only # arguments which were explictly passed are reflected. By contrast, # docker-py will include keys for arguments which were not passed in # but set the value to None. Thus, for ease of comparison, the below # loop will remove all keys with a value of None from the generated # pool configs. for idx, _ in enumerate(pool_dicts): for key in list(pool_dicts[idx]): if pool_dicts[idx][key] is None: del pool_dicts[idx][key] return ret
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani import torch from ..utils.utils import xywh2xyxy, bbox_iou def clip_boxes(boxes): boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=1) boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=1) def postprocess_pr(pr_outs, conf_thres=0.001, overlap_thres=0.5): """Restructure YOLOv3Tiny tensors into lists, then filter out non-maximal (redundant) annotations from the predictions.""" # pr_outs = [[bs, grid_positions, 85], [bs, 4*grid_positions, 85]] # when its two components are concatenated, we get a tensor [bs, 5*gridpositions, 85], which `bs` "slices" # have to be "stripped" to remove redundant components # strip each slice (corresponding to a single image in the batch) to get sequences of (possibly) different lengths: # the natural data structure to use to collect these sequences is a list pr_outs = [p.view(p.size(0), -1, p.size(-1)) for p in pr_outs] pr_outs = torch.cat(pr_outs, 1).detach().cpu() pr_labels = [None] * len(pr_outs) for img_id, pr in enumerate(pr_outs): # filter out irrelevant predictions pr_cls_prob, pr_cls_id = pr[:, 5:].max(1) pr[:, 4] *= pr_cls_prob i = (pr[:, 4] > conf_thres) & torch.isfinite(pr).all(1) pr = pr[i] if len(pr) == 0: continue pr_cls_prob = pr_cls_prob[i] pr_cls_id = pr_cls_id[i].unsqueeze(1).float() pr[:, :4] = xywh2xyxy(pr[:, :4]) pr = torch.cat((pr[:, :5], pr_cls_prob.unsqueeze(1), pr_cls_id), 1) pr = pr[(-pr[:, 4]).argsort()] detections = [] for c in pr[:, -1].unique(): pr_anno_c = pr[pr[:, -1] == c] n = len(pr_anno_c) if n == 1: detections.append(pr_anno_c) continue elif n > 100: pr_anno_c = pr_anno_c[:100] while len(pr_anno_c) > 0: if len(pr_anno_c) == 1: detections.append(pr_anno_c) break redundant = bbox_iou(pr_anno_c[0], pr_anno_c) > overlap_thres weights = pr_anno_c[redundant, 4:5] pr_anno_c[0, :4] = (weights * pr_anno_c[redundant, 0:4]).sum(0) / weights.sum() detections.append(pr_anno_c[0:1]) # keep leading dimension 1 for 1D tensor pr_anno_c = pr_anno_c[~redundant] if len(detections) > 0: detections = torch.cat(detections) clip_boxes(detections[:, :4]) pr_labels[img_id] = detections[(-detections[:, 4]).argsort()] return pr_labels def postprocess_gt(gt_labels): gt_labels = gt_labels.detach().cpu() bs = gt_labels[0, 0].to(torch.int) gt_labels = [gt_labels[gt_labels[:, 1] == i, 2:] for i in range(bs)] return gt_labels
""" This is an implementation of Function Secret Sharing Useful papers are: - Function Secret Sharing- Improvements and Extensions, Boyle 2017 Link: https://eprint.iacr.org/2018/707.pdf - Secure Computation with Preprocessing via Function Secret Sharing, Boyle 2019 Link: https://eprint.iacr.org/2019/1095 Note that the protocols are quite different in aspect from those papers """ import hashlib import torch as th import syft as sy λ = 110 # 6 # 110 or 63 # security parameter n = 32 # 8 # 32 # bit precision dtype = th.int32 no_wrap = {"no_wrap": True} def initialize_crypto_plans(worker): """ This is called manually for the moment, to build the plan used to perform Function Secret Sharing on a specific worker. """ eq_plan_1 = sy.Plan( forward_func=lambda x, y: mask_builder(x, y, "eq"), owner=worker, tags=["#fss_eq_plan_1"], is_built=True, ) worker.register_obj(eq_plan_1) eq_plan_2 = sy.Plan( forward_func=eq_eval_plan, owner=worker, tags=["#fss_eq_plan_2"], is_built=True ) worker.register_obj(eq_plan_2) comp_plan_1 = sy.Plan( forward_func=lambda x, y: mask_builder(x, y, "comp"), owner=worker, tags=["#fss_comp_plan_1"], is_built=True, ) worker.register_obj(comp_plan_1) comp_plan_2 = sy.Plan( forward_func=comp_eval_plan, owner=worker, tags=["#fss_comp_plan_2"], is_built=True ) worker.register_obj(comp_plan_2) xor_add_plan = sy.Plan( forward_func=xor_add_convert_1, owner=worker, tags=["#xor_add_1"], is_built=True ) worker.register_obj(xor_add_plan) xor_add_plan = sy.Plan( forward_func=xor_add_convert_2, owner=worker, tags=["#xor_add_2"], is_built=True ) worker.register_obj(xor_add_plan) def request_run_plan(worker, plan_tag, location, return_value, args=(), kwargs={}): response_ids = (sy.ID_PROVIDER.pop(),) args = (args, response_ids) response = worker.send_command( cmd_name="run", target=plan_tag, recipient=location, return_ids=response_ids, return_value=return_value, kwargs_=kwargs, args_=args, ) return response def fss_op(x1, x2, type_op="eq"): """ Define the workflow for a binary operation using Function Secret Sharing Currently supported operand are = & <=, respectively corresponding to type_op = 'eq' and 'comp' Args: x1: first AST x2: second AST type_op: type of operation to perform, should be 'eq' or 'comp' Returns: shares of the comparison """ me = sy.local_worker locations = x1.locations shares = [] for location in locations: args = (x1.child[location.id], x2.child[location.id]) share = request_run_plan( me, f"#fss_{type_op}_plan_1", location, return_value=True, args=args ) shares.append(share) mask_value = sum(shares) % 2 ** n shares = [] for i, location in enumerate(locations): args = (th.IntTensor([i]), mask_value) share = request_run_plan( me, f"#fss_{type_op}_plan_2", location, return_value=False, args=args ) shares.append(share) if type_op == "comp": prev_shares = shares shares = [] for prev_share, location in zip(prev_shares, locations): share = request_run_plan( me, "#xor_add_1", location, return_value=True, args=(prev_share,) ) shares.append(share) masked_value = shares[0] ^ shares[1] # TODO case >2 workers ? shares = {} for i, prev_share, location in zip(range(len(locations)), prev_shares, locations): share = request_run_plan( me, "#xor_add_2", location, return_value=False, args=(th.IntTensor([i]), masked_value), ) shares[location.id] = share else: shares = {loc.id: share for loc, share in zip(locations, shares)} response = sy.AdditiveSharingTensor(shares, **x1.get_class_attributes()) return response # share level def mask_builder(x1, x2, type_op): x = x1 - x2 # Keep the primitive in store as we use it after alpha, s_0, *CW = x1.owner.crypto_store.get_keys( f"fss_{type_op}", n_instances=x1.numel(), remove=False ) return x + alpha.reshape(x.shape) # share level def eq_eval_plan(b, x_masked): alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys( type_op="fss_eq", n_instances=x_masked.numel(), remove=True ) result_share = DPF.eval(b, x_masked, s_0, *CW) return result_share # share level def comp_eval_plan(b, x_masked): alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys( type_op="fss_comp", n_instances=x_masked.numel(), remove=True ) result_share = DIF.eval(b, x_masked, s_0, *CW) return result_share def xor_add_convert_1(x): xor_share, add_share = x.owner.crypto_store.get_keys( type_op="xor_add_couple", n_instances=x.numel(), remove=False ) return x ^ xor_share.reshape(x.shape) def xor_add_convert_2(b, x): xor_share, add_share = x.owner.crypto_store.get_keys( type_op="xor_add_couple", n_instances=x.numel(), remove=True ) return add_share.reshape(x.shape) * (1 - 2 * x) + x * b def eq(x1, x2): return fss_op(x1, x2, "eq") def le(x1, x2): return fss_op(x1, x2, "comp") class DPF: """Distributed Point Function - used for equality""" def __init__(self): pass @staticmethod def keygen(n_values=1): beta = th.tensor([1], dtype=dtype) alpha = th.randint(0, 2 ** n, (n_values,)) α = bit_decomposition(alpha) s, t, CW = ( Array(n + 1, 2, λ, n_values), Array(n + 1, 2, n_values), Array(n, 2 * (λ + 1), n_values), ) s[0] = randbit(size=(2, λ, n_values)) t[0] = th.tensor([[0, 1]] * n_values, dtype=th.uint8).t() for i in range(0, n): g0 = G(s[i, 0]) g1 = G(s[i, 1]) # Re-use useless randomness sL_0, _, sR_0, _ = split(g0, [λ, 1, λ, 1]) sL_1, _, sR_1, _ = split(g1, [λ, 1, λ, 1]) s_rand = (sL_0 ^ sL_1) * α[i] + (sR_0 ^ sR_1) * (1 - α[i]) cw_i = TruthTableDPF(s_rand, α[i]) CW[i] = cw_i ^ g0 ^ g1 for b in (0, 1): τ = [g0, g1][b] ^ (t[i, b] * CW[i]) τ = τ.reshape(2, λ + 1, n_values) # filtered_τ = τ[𝛼[i]] OLD α_i = α[i].unsqueeze(0).expand(λ + 1, n_values).unsqueeze(0).long() filtered_τ = th.gather(τ, 0, α_i).squeeze(0) s[i + 1, b], t[i + 1, b] = split(filtered_τ, [λ, 1]) CW_n = (-1) ** t[n, 1].to(dtype) * (beta - Convert(s[n, 0]) + Convert(s[n, 1])) return (alpha,) + s[0].unbind() + (CW, CW_n) @staticmethod def eval(b, x, *k_b): original_shape = x.shape x = x.reshape(-1) n_values = x.shape[0] x = bit_decomposition(x) s, t = Array(n + 1, λ, n_values), Array(n + 1, 1, n_values) s[0] = k_b[0] # here k[1:] is (CW, CW_n) CW = k_b[1].unbind() + (k_b[2],) t[0] = b for i in range(0, n): τ = G(s[i]) ^ (t[i] * CW[i]) τ = τ.reshape(2, λ + 1, n_values) x_i = x[i].unsqueeze(0).expand(λ + 1, n_values).unsqueeze(0).long() filtered_τ = th.gather(τ, 0, x_i).squeeze(0) s[i + 1], t[i + 1] = split(filtered_τ, [λ, 1]) flat_result = (-1) ** b * (Convert(s[n]) + t[n].squeeze() * CW[n]) return flat_result.reshape(original_shape) class DIF: """Distributed Interval Function - used for comparison <=""" def __init__(self): pass @staticmethod def keygen(n_values=1): alpha = th.randint(0, 2 ** n, (n_values,)) α = bit_decomposition(alpha) s, t, CW = ( Array(n + 1, 2, λ, n_values), Array(n + 1, 2, n_values), Array(n, 2 + 2 * (λ + 1), n_values), ) s[0] = randbit(size=(2, λ, n_values)) t[0] = th.tensor([[0, 1]] * n_values, dtype=th.uint8).t() for i in range(0, n): h0 = H(s[i, 0]) h1 = H(s[i, 1]) # Re-use useless randomness _, _, sL_0, _, sR_0, _ = split(h0, [1, 1, λ, 1, λ, 1]) _, _, sL_1, _, sR_1, _ = split(h1, [1, 1, λ, 1, λ, 1]) s_rand = (sL_0 ^ sL_1) * α[i] + (sR_0 ^ sR_1) * (1 - α[i]) cw_i = TruthTableDIF(s_rand, α[i]) CW[i] = cw_i ^ h0 ^ h1 for b in (0, 1): τ = [h0, h1][b] ^ (t[i, b] * CW[i]) τ = τ.reshape(2, λ + 2, n_values) # filtered_τ = τ[𝛼[i]] OLD α_i = α[i].unsqueeze(0).expand(λ + 2, n_values).unsqueeze(0).long() filtered_τ = th.gather(τ, 0, α_i).squeeze(0) σ_leaf, s[i + 1, b], t[i + 1, b] = split(filtered_τ, [1, λ, 1]) return (alpha,) + s[0].unbind() + (CW,) @staticmethod def eval(b, x, *k_b): original_shape = x.shape x = x.reshape(-1) n_values = x.shape[0] x = bit_decomposition(x) FnOutput = Array(n + 1, n_values) s, t = Array(n + 1, λ, n_values), Array(n + 1, 1, n_values) s[0] = k_b[0] CW = k_b[1].unbind() t[0] = b for i in range(0, n): τ = H(s[i]) ^ (t[i] * CW[i]) τ = τ.reshape(2, λ + 2, n_values) x_i = x[i].unsqueeze(0).expand(λ + 2, n_values).unsqueeze(0).long() filtered_τ = th.gather(τ, 0, x_i).squeeze(0) σ_leaf, s[i + 1], t[i + 1] = split(filtered_τ, [1, λ, 1]) FnOutput[i] = σ_leaf # Last tour, the other σ is also a leaf: FnOutput[n] = t[n] flat_result = FnOutput.sum(axis=0) % 2 return flat_result.reshape(original_shape) # PRG def G(seed): assert seed.shape[0] == λ seed_t = seed.t().tolist() gen_list = [] for seed_bit in seed_t: enc_str = str(seed_bit).encode() h = hashlib.sha3_256(enc_str) r = h.digest() binary_str = bin(int.from_bytes(r, byteorder="big"))[2 : 2 + (2 * (λ + 1))] gen_list.append(list(map(int, binary_str))) return th.tensor(gen_list, dtype=th.uint8).t() def H(seed): assert seed.shape[0] == λ seed_t = seed.t().tolist() gen_list = [] for seed_bit in seed_t: enc_str = str(seed_bit).encode() h = hashlib.sha3_256(enc_str) r = h.digest() binary_str = bin(int.from_bytes(r, byteorder="big"))[2 : 2 + 2 + (2 * (λ + 1))] gen_list.append(list(map(int, binary_str))) return th.tensor(gen_list, dtype=th.uint8).t() def Convert(bits): bit_pow_lambda = th.flip(2 ** th.arange(λ), (0,)).unsqueeze(-1).to(th.long) return (bits.to(th.long) * bit_pow_lambda).sum(dim=0).to(dtype) def Array(*shape): return th.empty(shape, dtype=th.uint8) bit_pow_n = th.flip(2 ** th.arange(n), (0,)) def bit_decomposition(x): x = x.unsqueeze(-1) z = bit_pow_n & x z = z.t() return (z > 0).to(th.uint8) def randbit(size): return th.randint(2, size=size) def concat(*args, **kwargs): return th.cat(args, **kwargs) def split(x, idx): return th.split(x, idx) def TruthTableDPF(s, α_i): one = th.ones((1, s.shape[1])).to(th.uint8) s_one = concat(s, one) Table = th.zeros((2, λ + 1, len(α_i)), dtype=th.uint8) for j, el in enumerate(α_i): Table[el.item(), :, j] = s_one[:, j] return Table.reshape(-1, Table.shape[2]) def TruthTableDIF(s, α_i): leafTable = th.zeros((2, 1, len(α_i)), dtype=th.uint8) # TODO optimize: just put alpha on first line leaf_value = α_i for j, el in enumerate(α_i): leafTable[(1 - el).item(), 0, j] = leaf_value[j] one = th.ones((1, s.shape[1])).to(th.uint8) s_one = concat(s, one) nextTable = th.zeros((2, λ + 1, len(α_i)), dtype=th.uint8) for j, el in enumerate(α_i): nextTable[el.item(), :, j] = s_one[:, j] Table = concat(leafTable, nextTable, axis=1) Table = Table.reshape(-1, Table.shape[2]) return Table
#!/usr/bin/env python3 """ Test for local-subnet identifier """ import unittest import netifaces from base_test import PschedTestBase from pscheduler.limitprocessor.identifier.localsubnet import * DATA = { } class TestLimitprocessorIdentifierLocalSubnet(PschedTestBase): """ Test the Identifier """ def test_data_is_valid(self): """Limit Processor / Identifier Local Subnet / Data Validation""" self.assertEqual(data_is_valid(DATA), (True, "OK")) self.assertEqual(data_is_valid({ "abc": 123 }), (False, 'Data is not an object or not empty.')) def test_identifier(self): """Limit Processor / Identifier Local Subnet / Identifier""" test_ifaces = { "lo0": { netifaces.AF_INET: [ {'addr': '127.0.0.1', 'netmask': '255.0.0.0', 'peer': '127.0.0.1'} ], netifaces.AF_INET6: [ {'addr': '::1', 'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128', 'peer': '::1', 'flags': 0}, {'addr': 'fe80::1%lo0', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 0} ] } } ident = IdentifierLocalSubnet(DATA, test_ifaces=test_ifaces) self.assertEqual( ident.evaluate({ "requester": "127.0.0.5" }), True) self.assertEqual( ident.evaluate({ "requester": "fe80::1" }), True) self.assertEqual( ident.evaluate({ "requester": "192.0.2.9" }), False) self.assertEqual( ident.evaluate({ "requester": "2001:db8::1" }), False) if __name__ == '__main__': unittest.main()
from PuzzleLib.Cuda.Kernels.RadixSort import backendTest def unittest(): from PuzzleLib.Hip import Backend backendTest(Backend) if __name__ == "__main__": unittest()
import os from setuptools import setup README = """ See the README on `GitHub <https://github.com/uw-it-aca/app_name>`_. """ # The VERSION file is created by travis-ci, based on the tag name version_path = "app_name/VERSION" print(os.path.join(os.path.dirname(__file__), version_path)) VERSION = open(os.path.join(os.path.dirname(__file__), version_path)).read() VERSION = VERSION.replace("\n", "") # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) url = "https://github.com/uw-it-aca/app_name" setup( name="app_name", version=VERSION, packages=["app_name"], author="UW-IT AXDD", author_email="aca-it@uw.edu", include_package_data=True, install_requires=[ 'django~=3.2', "django-webpack-loader", ], license="Apache License, Version 2.0", description="A tool for visually displaying UW course prerequisites", long_description=README, url=url, classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2.7", ], )
from abc import abstractmethod from ml import LabelStudioMLBase class LabelStudioMLBaseHelper(LabelStudioMLBase): @abstractmethod def prepare_tasks(self, tasks, workdir=None, **kwargs): pass @abstractmethod def convert_predictions(self, predictions, **kwargs): pass @abstractmethod def predict2(self, X, y=None, **kwargs): pass @abstractmethod def fit2(self, X, y, **kwargs): pass def predict(self, tasks, **kwargs): X, y = self.prepare_tasks(tasks, **kwargs) predictions = self.predict2(X, y, **kwargs) result = self.convert_predictions(predictions, **kwargs) return result def fit(self, completions, workdir=None, **kwargs): X, y = self.prepare_tasks(completions, workdir=workdir, **kwargs) return self.fit2(X, y, **kwargs) def _has_annotation(self, task): return 'completions' in task class LabelStudioMLChoices(LabelStudioMLBaseHelper): def __init__(self, **kwargs): super(LabelStudioMLChoices, self).__init__(**kwargs) assert len(self.parsed_label_config) == 1 self.from_name, self.info = list(self.parsed_label_config.items())[0] assert self.info['type'] == 'Choices' assert len(self.info['to_name']) == 1 assert len(self.info['inputs']) == 1 self.to_name = self.info['to_name'][0] self.value = self.info['inputs'][0]['value'] def prepare_tasks(self, tasks, workdir=None, **kwargs): X, y = [], [] for task in tasks: X.append(task['data'][self.value]) if self._has_annotation(task): choices = task['completions'][0]['result'][0]['value']['choices'] y.append(choices) else: y.append(None) return X, y def convert_predictions(self, predictions, **kwargs): list_choices, scores = predictions results = [] for choices, score in zip(list_choices, scores): result = [{ 'from_name': self.from_name, 'to_name': self.to_name, 'type': 'choices', 'value': {'choices': choices} }] results.append({'result': result, 'score': score}) return results
import requests import urllib import time import hashlib import hmac import itertools try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from .api import Base from .errors import ApiError, ArgumentError def check_values(value, arg, arg_value): if type(value) == type: if type(arg_value) != value: raise ArgumentError(u"Type of argument {} is invalid. It should be {}".format(arg, value)) elif arg_value not in value: raise ArgumentError(u"Value of argument {} is invalid. It should be one of {}".format(arg, value)) def check_args(kwargs, required_parameters, optional_parameters={}): args = kwargs.keys() required_args = required_parameters.keys() optional_args = optional_parameters.keys() missing_args = list(set(required_args) - set(args)) if len(missing_args) > 0: raise ArgumentError(u"Parameter {} is required".format(missing_args)) for arg_name, arg_value in kwargs.items(): if arg_name in optional_args: optional_value = optional_parameters[arg_name] check_values(optional_value, arg_name, arg_value) elif arg_name in required_args: required_value = required_parameters[arg_name] check_values(required_value, arg_name, arg_value) class TradeApi(Base): def __init__(self, identifier=None, secret=None): self.id = identifier self.secret = secret self.path = "/tapi/v3/" self.available_pairs = ["BRLBTC", "BRLLTC", "BRLBCH", "BRLXRP", "BRLETH", "BRLUSDC", "BRLMBPRK01", "BRLMBPRK02", "BRLMBPRK03", "BRLMBPRK04", "BRLMBCONS01"] Base.__init__(self) def list_system_messages(self, level="INFO"): """https://www.mercadobitcoin.com.br/trade-api/#list_system_messages""" payload = { "level": level } check_args(payload, { "level": ["INFO", "WARNING", "ERROR"] }) return self.__check_response(self.__post_tapi("list_system_messages", payload)) def get_account_info(self): """https://www.mercadobitcoin.com.br/trade-api/#get_account_info""" return self.__check_response(self.__post_tapi("get_account_info")) def get_order(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#get_order""" check_args(kwargs, { "coin_pair": self.available_pairs, "order_id": int }) return self.__check_response(self.__post_tapi("get_order", kwargs)) def list_orders(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#list_orders""" check_args(kwargs, { "coin_pair": self.available_pairs }, { "order_type": [1, 2], "status_list": str, "has_fills": [True, False], "from_id": int, "to_id": int, "from_timestamp": str, "to_timestamp": str }) return self.__check_response(self.__post_tapi("list_orders", kwargs )) def list_orderbook(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#list_orderbook""" check_args(kwargs, { "coin_pair": self.available_pairs }, { "full": [True, False] }) return self.__check_response(self.__post_tapi("list_orderbook", kwargs )) def place_buy_order(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#place_buy_order""" check_args(kwargs, { "coin_pair": self.available_pairs, "quantity": str, "limit_price": str }) return self.__check_response(self.__post_tapi("place_buy_order", kwargs )) def place_sell_order(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#place_sell_order""" check_args(kwargs, { "coin_pair": self.available_pairs, "quantity": str, "limit_price": str }) return self.__check_response(self.__post_tapi("place_sell_order", kwargs )) def cancel_order(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#cancel_order""" check_args(kwargs, { "coin_pair": self.available_pairs, "order_id": int }) return self.__check_response(self.__post_tapi("cancel_order", kwargs )) def get_withdrawal(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#get_withdrawal""" check_args(kwargs, { "coin": self.available_pairs, "withdrawal_id": int }) return self.__check_response(self.__post_tapi("get_withdrawal", kwargs )) def withdraw_coin_brl(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#withdraw_coin""" check_args(kwargs, { "coin": ["BRL"], "quantity": str, "account_ref": str }, { "description": str }) return self.__check_response(self.__post_tapi("withdraw_coin", kwargs )) def withdraw_coin(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#withdraw_coin""" check_args(kwargs, { "coin": ["BTC", "LTC", "BCH", "ETH"], "quantity": str, "address": str, "tx_fee": str }, { "description": str }) return self.__check_response(self.__post_tapi("withdraw_coin", kwargs )) def withdraw_coin_xrp(self, **kwargs): """https://www.mercadobitcoin.com.br/trade-api/#withdraw_coin""" check_args(kwargs, { "coin": ["XRP"], "quantity": str, "address": str, "tx_fee": str, "destination_tag": int }, { "description": str }) return self.__check_response(self.__post_tapi("withdraw_coin", kwargs )) def __check_response(self, response): if response["status_code"] == 100: return response["response_data"] else: raise ApiError(response["error_message"], response["status_code"]) def __post_tapi(self, method, params={}): payload = { "tapi_method": method, "tapi_nonce": str(int(time.time()*1000000))} payload.update(params) headers = { "Content-Type": "application/x-www-form-urlencoded", "TAPI-ID": self.id, "TAPI-MAC": self.__signature(payload) } response = requests.post("https://{}{}".format(self.host, self.path), headers=headers, data=payload) return response.json() def __signature(self, payload): signature = hmac.new(self.secret, digestmod=hashlib.sha512) params = self.path + '?' + urlencode(payload) signature.update(params.encode('utf-8')) return signature.hexdigest()
from umonitor import __version__ def test_version(): assert __version__ == '0.1.5'
#!/usr/bin/env python3 import sys, utils, random # import the modules we will need utils.check_version((3,7)) # make sure we are running at least Python 3.7 utils.clear() # clear the screen print('Greetings!') # prints out "Greetings!" in the terminal. colors = ['red','orange','yellow','green','blue','violet','purple'] # creates a list of colors which will be saved for future use. play_again = '' # creates a variable called "play_again" that is just a space at the moemnt best_count = sys.maxsize # the biggest number, which makes it so that the first time they play the game, they will get their best guess so far. while (play_again != 'n' and play_again != 'no'): # will repeat the game, as long as the player has not responded negatively to playing again. match_color = random.choice(colors) # the program picks a random color from the list we created earlier so the game is different every time. count = 0 # starts a counter at 0 that will be used to check how many attempts the user had to go through in order to guess the correct color color = '' # creates the variable color, which will soon be replaced by the user's input. while (color != match_color): # will run this loop while the color does not match the randomly selected color color = input("\nWhat is my favorite color? ") #\n is a special code that adds a new line this is also taking an input from the user after printing "What is my favorite color?" in the window. color = color.lower().strip() # this line takes the user's guessed color and strips it of spaces as well as downcasing all letters count += 1 # this adds one to the count variable, tracking that the user just made a guess. if (color == match_color): # checks if the guessed color matches the randomly selected color. print('Correct!') # if so the program will print "Correct!" else: # if the above check does not return true, the program will run what falls under this line. print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count)) # the program prints the text within the quotes while replacing {guesses} with the variable saved in count print('\nYou guessed it in {0} tries!'.format(count)) #the program prints the text within the counts and replaces {0} with the variable stored in count if (count < best_count): # checks if the player had to use less guesses then their best run of this game so far. print('This was your best guess so far!') # if the above check returns true, then the program prints the text within the quotes. best_count = count # if the above check returns true, the current count for this game replaces best_count as the new record. play_again = input("\nWould you like to play again? ").lower().strip() #checks if the player would like to play again, and strips and downcases the input to save as the play_again input print('Thanks for playing!') #once the player has ended the game by responded with "n" or "no" the program prints the text with quotes on this line.
#!/usr/bin/env python3 # Copyright (c) 2017-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoinexodusd node under test""" import contextlib import decimal import errno from enum import Enum import http.client import json import logging import os import re import subprocess import tempfile import time import urllib.parse from .authproxy import JSONRPCException from .util import ( append_config, delete_cookie_file, get_rpc_proxy, rpc_url, wait_until, p2p_port, ) # For Python 3.4 compatibility JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOINEXODUSD_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 class TestNode(): """A class for representing a bitcoinexodusd node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, datadir, *, rpchost, timewait, bitcoinexodusd, bitcoinexodus_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False): self.index = i self.datadir = datadir self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.rpchost = rpchost self.rpc_timeout = timewait self.binary = bitcoinexodusd self.coverage_dir = coverage_dir if extra_conf != None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the standard list below. # For those callers that need more flexibility, they can just set the args property directly. # Note that common args are set in the config file (see initialize_datadir) self.extra_args = extra_args self.args = [ self.binary, "-datadir=" + self.datadir, "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i ] self.cli = TestNodeCLI(bitcoinexodus_cli, self.datadir) self.use_cli = use_cli self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) self.cleanup_on_exit = True # Whether to kill the node when this object goes away self.p2ps = [] def get_deterministic_priv_key(self): """Return a deterministic priv key in base58, that only depends on the node's index""" PRIV_KEYS = [ # adress , privkey ('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), ('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), ('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), ('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), ('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), ('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), ('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), ('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), ('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), ] return PRIV_KEYS[self.index] def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return "[node %d] %s" % (self.index, msg) def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any bitcoinexodusd processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(self.cli, name) else: assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") return getattr(self.rpc, name) def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time bitcoinexodusd is started if stderr is None: stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) if stdout is None: stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoinexodusd, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs) self.running = True self.log.debug("bitcoinexodusd started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoinexodusd process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: raise FailedToStartError(self._node_msg( 'bitcoinexodusd exited with status {} during initialization'.format(self.process.returncode))) try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoinexodusd still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error("Unable to connect to bitcoinexodusd") def get_wallet_rpc(self, wallet_name): if self.use_cli: return self.cli("-rpcwallet={}".format(wallet_name)) else: assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected") wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name)) return self.rpc / wallet_path def stop_node(self, expected_stderr=''): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # Check that stderr is as expected self.stderr.seek(0) stderr = self.stderr.read().decode('utf-8').strip() if stderr != expected_stderr: raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr)) self.stdout.close() self.stderr.close() del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( "Node returned non-zero exit code (%d) when stopping" % return_code) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOINEXODUSD_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) @contextlib.contextmanager def assert_debug_log(self, expected_msgs): debug_log = os.path.join(self.datadir, 'regtest', 'debug.log') with open(debug_log, encoding='utf-8') as dl: dl.seek(0, 2) prev_size = dl.tell() try: yield finally: with open(debug_log, encoding='utf-8') as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for expected_msg in expected_msgs: if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log)) def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoinexodusd expected_msg: regex that stderr should match when bitcoinexodusd fails Will throw if bitcoinexodusd starts without an error. Will throw if an expected_msg is provided and it does not match bitcoinexodusd's stdout.""" with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: try: self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) self.wait_for_rpc_connection() self.stop_node() self.wait_until_stopped() except FailedToStartError as e: self.log.debug('bitcoinexodusd failed to start: %s', e) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8').strip() if match == ErrorMatch.PARTIAL_REGEX: if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) else: if expected_msg is None: assert_msg = "bitcoinexodusd should have exited with an error" else: assert_msg = "bitcoinexodusd should have exited with expected error " + expected_msg self._raise_assertion_error(assert_msg) def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes bitcoinexodusd to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) self.wait_until_stopped() def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(**kwargs)() self.p2ps.append(p2p_conn) if wait_for_verack: p2p_conn.wait_for_verack() return p2p_conn @property def p2p(self): """Return the first p2p connection Convenience property - most tests only use a single p2p connection to each node, so this saves having to write node.p2ps[0] many times.""" assert self.p2ps, self._node_msg("No p2p connection") return self.p2ps[0] def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) class TestNodeCLI(): """Interface to bitcoinexodus-cli for an individual node""" def __init__(self, binary, datadir): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoinexoduscli') def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoinexodus-cli command-line options cli = TestNodeCLI(self.binary, self.datadir) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoinexodus-cli command. Deserializes returned string as python object.""" pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args] named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()] assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoinexodus-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug("Running bitcoinexodus-cli command: %s" % command) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except JSONDecodeError: return cli_stdout.rstrip("\n")
import warnings import numpy as np from skimage import img_as_float from skimage.util.dtype import dtype_range, dtype_limits from skimage._shared.utils import deprecated __all__ = ['histogram', 'cumulative_distribution', 'equalize', 'rescale_intensity', 'adjust_gamma', 'adjust_log', 'adjust_sigmoid'] def histogram(image, nbins=256): """Return histogram of image. Unlike `numpy.histogram`, this function returns the centers of bins and does not rebin integer arrays. For integer arrays, each integer value has its own bin, which improves speed and intensity-resolution. The histogram is computed on the flattened image: for color images, the function should be used separately on each channel to obtain a histogram for each color channel. Parameters ---------- image : array Input image. nbins : int Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- hist : array The values of the histogram. bin_centers : array The values at the center of the bins. Examples -------- >>> from skimage import data, exposure, util >>> image = util.img_as_float(data.camera()) >>> np.histogram(image, bins=2) (array([107432, 154712]), array([ 0. , 0.5, 1. ])) >>> exposure.histogram(image, nbins=2) (array([107432, 154712]), array([ 0.25, 0.75])) """ sh = image.shape if len(sh) == 3 and sh[-1] < 4: warnings.warn("This might be a color image. The histogram will be " "computed on the flattened image. You can instead " "apply this function to each color channel.") # For integer types, histogramming with bincount is more efficient. if np.issubdtype(image.dtype, np.integer): offset = 0 if np.min(image) < 0: offset = np.min(image) hist = np.bincount(image.ravel() - offset) bin_centers = np.arange(len(hist)) + offset # clip histogram to start with a non-zero bin idx = np.nonzero(hist)[0][0] return hist[idx:], bin_centers[idx:] else: hist, bin_edges = np.histogram(image.flat, nbins) bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2. return hist, bin_centers def cumulative_distribution(image, nbins=256): """Return cumulative distribution function (cdf) for the given image. Parameters ---------- image : array Image array. nbins : int Number of bins for image histogram. Returns ------- img_cdf : array Values of cumulative distribution function. bin_centers : array Centers of bins. References ---------- .. [1] http://en.wikipedia.org/wiki/Cumulative_distribution_function """ hist, bin_centers = histogram(image, nbins) img_cdf = hist.cumsum() img_cdf = img_cdf / float(img_cdf[-1]) return img_cdf, bin_centers @deprecated('equalize_hist') def equalize(image, nbins=256): return equalize_hist(image, nbins) def equalize_hist(image, nbins=256): """Return image after histogram equalization. Parameters ---------- image : array Image array. nbins : int Number of bins for image histogram. Returns ------- out : float array Image array after histogram equalization. Notes ----- This function is adapted from [1]_ with the author's permission. References ---------- .. [1] http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html .. [2] http://en.wikipedia.org/wiki/Histogram_equalization """ image = img_as_float(image) cdf, bin_centers = cumulative_distribution(image, nbins) out = np.interp(image.flat, bin_centers, cdf) return out.reshape(image.shape) def rescale_intensity(image, in_range=None, out_range=None): """Return image after stretching or shrinking its intensity levels. The image intensities are uniformly rescaled such that the minimum and maximum values given by `in_range` match those given by `out_range`. Parameters ---------- image : array Image array. in_range : 2-tuple (float, float) Min and max *allowed* intensity values of input image. If None, the *allowed* min/max values are set to the *actual* min/max values in the input image. out_range : 2-tuple (float, float) Min and max intensity values of output image. If None, use the min/max intensities of the image data type. See `skimage.util.dtype` for details. Returns ------- out : array Image array after rescaling its intensity. This image is the same dtype as the input image. Examples -------- By default, intensities are stretched to the limits allowed by the dtype: >>> image = np.array([51, 102, 153], dtype=np.uint8) >>> rescale_intensity(image) array([ 0, 127, 255], dtype=uint8) It's easy to accidentally convert an image dtype from uint8 to float: >>> 1.0 * image array([ 51., 102., 153.]) Use `rescale_intensity` to rescale to the proper range for float dtypes: >>> image_float = 1.0 * image >>> rescale_intensity(image_float) array([ 0. , 0.5, 1. ]) To maintain the low contrast of the original, use the `in_range` parameter: >>> rescale_intensity(image_float, in_range=(0, 255)) array([ 0.2, 0.4, 0.6]) If the min/max value of `in_range` is more/less than the min/max image intensity, then the intensity levels are clipped: >>> rescale_intensity(image_float, in_range=(0, 102)) array([ 0.5, 1. , 1. ]) If you have an image with signed integers but want to rescale the image to just the positive range, use the `out_range` parameter: >>> image = np.array([-10, 0, 10], dtype=np.int8) >>> rescale_intensity(image, out_range=(0, 127)) array([ 0, 63, 127], dtype=int8) """ dtype = image.dtype.type if in_range is None: imin = np.min(image) imax = np.max(image) else: imin, imax = in_range if out_range is None: omin, omax = dtype_range[dtype] if imin >= 0: omin = 0 else: omin, omax = out_range image = np.clip(image, imin, imax) image = (image - imin) / float(imax - imin) return dtype(image * (omax - omin) + omin) def _assert_non_negative(image): if np.any(image < 0): raise ValueError('Image Correction methods work correctly only on ' 'images with non-negative values. Use ' 'skimage.exposure.rescale_intensity.') def adjust_gamma(image, gamma=1, gain=1): """Performs Gamma Correction on the input image. Also known as Power Law Transform. This function transforms the input image pixelwise according to the equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1. Parameters ---------- image : ndarray Input image. gamma : float Non negative real number. Default value is 1. gain : float The constant multiplier. Default value is 1. Returns ------- out : ndarray Gamma corrected output image. Notes ----- For gamma greater than 1, the histogram will shift towards left and the output image will be darker than the input image. For gamma less than 1, the histogram will shift towards right and the output image will be brighter than the input image. References ---------- .. [1] http://en.wikipedia.org/wiki/Gamma_correction """ _assert_non_negative(image) dtype = image.dtype.type if gamma < 0: return "Gamma should be a non-negative real number" scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0]) out = ((image / scale) ** gamma) * scale * gain return dtype(out) def adjust_log(image, gain=1, inv=False): """Performs Logarithmic correction on the input image. This function transforms the input image pixelwise according to the equation ``O = gain*log(1 + I)`` after scaling each pixel to the range 0 to 1. For inverse logarithmic correction, the equation is ``O = gain*(2**I - 1)``. Parameters ---------- image : ndarray Input image. gain : float The constant multiplier. Default value is 1. inv : float If True, it performs inverse logarithmic correction, else correction will be logarithmic. Defaults to False. Returns ------- out : ndarray Logarithm corrected output image. References ---------- .. [1] http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf """ _assert_non_negative(image) dtype = image.dtype.type scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0]) if inv: out = (2 ** (image / scale) - 1) * scale * gain return dtype(out) out = np.log2(1 + image / scale) * scale * gain return dtype(out) def adjust_sigmoid(image, cutoff=0.5, gain=10, inv=False): """Performs Sigmoid Correction on the input image. Also known as Contrast Adjustment. This function transforms the input image pixelwise according to the equation ``O = 1/(1 + exp*(gain*(cutoff - I)))`` after scaling each pixel to the range 0 to 1. Parameters ---------- image : ndarray Input image. cutoff : float Cutoff of the sigmoid function that shifts the characteristic curve in horizontal direction. Default value is 0.5. gain : float The constant multiplier in exponential's power of sigmoid function. Default value is 10. inv : bool If True, returns the negative sigmoid correction. Defaults to False. Returns ------- out : ndarray Sigmoid corrected output image. References ---------- .. [1] Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions", http://www.cis.rit.edu/fairchild/PDFs/PAP07.pdf """ _assert_non_negative(image) dtype = image.dtype.type scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0]) if inv: out = (1 - 1 / (1 + np.exp(gain * (cutoff - image/scale)))) * scale return dtype(out) out = (1 / (1 + np.exp(gain * (cutoff - image/scale)))) * scale return dtype(out)
# Copyright 2020-2022 OpenDR European Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import shutil import torch from opendr.perception.object_tracking_2d import ObjectTracking2DDeepSortLearner from opendr.perception.object_tracking_2d import ( Market1501Dataset, Market1501DatasetIterator, ) from opendr.perception.object_tracking_2d import ( MotDataset, RawMotWithDetectionsDatasetIterator, ) import os DEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' print("Using device:", DEVICE) print("Using device:", DEVICE, file=sys.stderr) def rmfile(path): try: os.remove(path) except OSError as e: print("Error: %s - %s." % (e.filename, e.strerror)) def rmdir(_dir): try: shutil.rmtree(_dir) except OSError as e: print("Error: %s - %s." % (e.filename, e.strerror)) class TestObjectTracking2DDeepSortLearner(unittest.TestCase): @classmethod def setUpClass(cls): cls.temp_dir = os.path.join("tests", "sources", "tools", "perception", "object_tracking_2d", "deep_sort", "deep_sort_temp") cls.train_split_paths = { "nano_mot20": os.path.join( ".", "src", "opendr", "perception", "object_tracking_2d", "datasets", "splits", "nano_mot20.train" ) } cls.model_names = [ "deep_sort", ] cls.mot_dataset_path = MotDataset.download_nano_mot20( os.path.join(cls.temp_dir, "mot_dataset"), True ).path cls.market1501_dataset_path = Market1501Dataset.download_nano_market1501( os.path.join(cls.temp_dir, "market1501_dataset"), True ).path print("Dataset downloaded", file=sys.stderr) for model_name in cls.model_names: ObjectTracking2DDeepSortLearner.download( model_name, cls.temp_dir ) print("Models downloaded", file=sys.stderr) @classmethod def tearDownClass(cls): # Clean up downloaded files rmdir(os.path.join(cls.temp_dir)) def test_fit(self): def test_model(name): dataset = Market1501Dataset(self.market1501_dataset_path) learner = ObjectTracking2DDeepSortLearner( temp_path=self.temp_dir, device=DEVICE, ) starting_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone() learner.fit( dataset, epochs=2, val_epochs=2, verbose=True, ) new_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone() self.assertFalse(torch.equal(starting_param, new_param)) print("Fit", name, "ok", file=sys.stderr) for name in self.model_names: test_model(name) def test_fit_iterator(self): def test_model(name): dataset = Market1501DatasetIterator( os.path.join(self.market1501_dataset_path, "bounding_box_train"), ) eval_dataset = Market1501DatasetIterator( os.path.join(self.market1501_dataset_path, "bounding_box_test"), ) learner = ObjectTracking2DDeepSortLearner( checkpoint_after_iter=3, temp_path=self.temp_dir, device=DEVICE, ) starting_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone() learner.fit( dataset, epochs=2, val_dataset=eval_dataset, val_epochs=2, verbose=True, ) new_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone() self.assertFalse(torch.equal(starting_param, new_param)) print("Fit iterator", name, "ok", file=sys.stderr) for name in self.model_names: test_model(name) def test_eval(self): def test_model(name): model_path = os.path.join(self.temp_dir, name) train_split_paths = { "nano_mot20": os.path.join( ".", "src", "opendr", "perception", "object_tracking_2d", "datasets", "splits", "nano_mot20.train" ) } dataset = RawMotWithDetectionsDatasetIterator( self.mot_dataset_path, train_split_paths ) learner = ObjectTracking2DDeepSortLearner( temp_path=self.temp_dir, device=DEVICE, ) learner.load(model_path, verbose=True) result = learner.eval(dataset) self.assertGreater(len(result["mota"]), 0) for name in self.model_names: test_model(name) def test_infer(self): def test_model(name): model_path = os.path.join(self.temp_dir, name) train_split_paths = { "nano_mot20": os.path.join( ".", "src", "opendr", "perception", "object_tracking_2d", "datasets", "splits", "nano_mot20.train" ) } dataset = RawMotWithDetectionsDatasetIterator( self.mot_dataset_path, train_split_paths ) learner = ObjectTracking2DDeepSortLearner( temp_path=self.temp_dir, device=DEVICE, ) learner.load(model_path, verbose=True) result = learner.infer(dataset[0][0], 1) self.assertTrue(len(result) > 0) learner.reset() result = learner.infer([ dataset[0][0], dataset[1][0], ]) self.assertTrue(len(result) == 2) self.assertTrue(len(result[0]) > 0) for name in self.model_names: test_model(name) def test_save(self): def test_model(name): model_path = os.path.join(self.temp_dir, "test_save_" + name) save_path = os.path.join(model_path, "save") learner = ObjectTracking2DDeepSortLearner( temp_path=self.temp_dir, device=DEVICE, ) learner.save(save_path, True) starting_param_1 = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone() learner2 = ObjectTracking2DDeepSortLearner( temp_path=self.temp_dir, device=DEVICE, ) learner2.load(save_path) new_param = list(learner2.tracker.deepsort.extractor.net.parameters())[0].clone() self.assertTrue(torch.equal(starting_param_1, new_param)) for name in self.model_names: test_model(name) def test_optimize(self): def test_model(name): model_path = os.path.join(self.temp_dir, name) train_split_paths = { "nano_mot20": os.path.join( ".", "src", "opendr", "perception", "object_tracking_2d", "datasets", "splits", "nano_mot20.train" ) } dataset = RawMotWithDetectionsDatasetIterator( self.mot_dataset_path, train_split_paths ) learner = ObjectTracking2DDeepSortLearner( temp_path=self.temp_dir, device=DEVICE, ) learner.load(model_path, verbose=True) learner.optimize() result = learner.eval(dataset) self.assertGreater(len(result["mota"]), 0) for name in self.model_names: test_model(name) if __name__ == "__main__": unittest.main()
# coding: utf-8 """Python data types for IB Flex format XML data. These class definitions are introspected by ibflex.parser to type-convert IB data. They're dataclasses, made immutable by passing `Frozen=True` to the class decorator. Class attributes are annotated with PEP 484 type hints. Except for the top-level XML elements, i.e. <FlexQueryResponse>, <FlexStatements>, and <FlexStatement>, the Flex format cleanly differentiates between data-bearing elements and container elements. Data elements hold their values in XML element attributes; container elements are sequences of child elements (usually data elements, but sometimes other containers). XML element attributes are represented by class attributes hinted with the Python type to which their values should be converted. Almost all are marked `Optional`, since Flex report configuration allows any of them to be included or omitted individually. Default value is `None` for a single value, or an empty tuple for a sequence. Specifically defined enums are an exception; the parser handles missing values for them, so you shouldn't specify a default value. The enums therefore need to come first in the class definition to avoid offending dataclass. Some data elements have XML attributes whose values are sequences delimited by commas or semicolons. These are represented as by class attributes hinted as a variable-length `Tuple` of their sequence item type (`str` or an Enum type). XML container elements are represented as variable-length `Tuple` of contained child type. TODO - need types for: FdicInsuredDepositsByBank ComplexPositions HKIPOSubscriptionActivity PendingExcercises FxTransactions UnbookedTrades RoutingCommissions IBGNoteTransactions Adjustments SoftDollars CFDCharges SLBOpenContracts HKIPOOpenSubscriptions """ # PEP 563 compliance # https://www.python.org/dev/peps/pep-0563/#resolving-type-hints-at-runtime from __future__ import annotations __all__ = [ "FlexElement", "FlexQueryResponse", "FlexStatement", "AccountInformation", "ChangeInNAV", "MTMPerformanceSummaryUnderlying", "EquitySummaryByReportDateInBase", "MTDYTDPerformanceSummaryUnderlying", "CashReportCurrency", "FIFOPerformanceSummaryUnderlying", "NetStockPosition", "UnsettledTransfer", "UnbundledCommissionDetail", "StatementOfFundsLine", "ChangeInPositionValue", "OpenPosition", "FxLot", "Trade", "TradeConfirm", "OptionEAE", "TradeTransfer", "TierInterestDetail", "HardToBorrowDetail", "InterestAccrualsCurrency", "SLBActivity", "Transfer", "CorporateAction", "CashTransaction", "ChangeInDividendAccrual", "OpenDividendAccrual", "SecurityInfo", "ConversionRate", "PriorPeriodPosition", "ClientFee", "ClientFeesDetail", "SalesTax", "DebitCardActivity", "SymbolSummary", "Order" ] import datetime import decimal from dataclasses import dataclass, astuple from typing import Tuple, Optional from ibflex import enums @dataclass(frozen=True) class FlexElement: """ Base class for data element types """ def __iter__(self): return iter(astuple(self)) def items(self): for attr, val in self.__dict__.items(): yield attr, val @dataclass(frozen=True) class FlexQueryResponse(FlexElement): """ Root element """ queryName: str type: str FlexStatements: Tuple["FlexStatement", ...] def __repr__(self): repr = ( f"{type(self).__name__}(" f"queryName={self.queryName!r}, " f"type={self.type!r}, " f"len(FlexStatements)={len(self.FlexStatements)}" ")" ) return repr @dataclass(frozen=True) class FlexStatement(FlexElement): """ Wrapped in <FlexStatements> """ accountId: str fromDate: datetime.date toDate: datetime.date period: str whenGenerated: datetime.datetime AccountInformation: Optional["_AccountInformation"] = None ChangeInNAV: Optional["_ChangeInNAV"] = None CashReport: Tuple["CashReportCurrency", ...] = () MTDYTDPerformanceSummary: Tuple["MTDYTDPerformanceSummaryUnderlying", ...] = () MTMPerformanceSummaryInBase: Tuple["MTMPerformanceSummaryUnderlying", ...] = () EquitySummaryInBase: Tuple["EquitySummaryByReportDateInBase", ...] = () FIFOPerformanceSummaryInBase: Tuple["FIFOPerformanceSummaryUnderlying", ...] = () FdicInsuredDepositsByBank: Tuple = () # TODO StmtFunds: Tuple["StatementOfFundsLine", ...] = () ChangeInPositionValues: Tuple["ChangeInPositionValue", ...] = () OpenPositions: Tuple["OpenPosition", ...] = () NetStockPositionSummary: Tuple["NetStockPosition", ...] = () ComplexPositions: Tuple = () # TODO FxPositions: Tuple["FxLot", ...] = () # N.B. FXLot wrapped in FxLots Trades: Tuple["Trade", ...] = () HKIPOSubscriptionActivity: Tuple = () # TODO TradeConfirms: Tuple["TradeConfirm", ...] = () TransactionTaxes: Tuple = () OptionEAE: Tuple["_OptionEAE", ...] = () # Not a typo - they really spell it "Excercises" PendingExcercises: Tuple = () # TODO TradeTransfers: Tuple["TradeTransfer", ...] = () FxTransactions: Tuple = () # TODO UnbookedTrades: Tuple = () # TODO RoutingCommissions: Tuple = () # TODO IBGNoteTransactions: Tuple = () # TODO UnsettledTransfers: Tuple["UnsettledTransfer", ...] = () UnbundledCommissionDetails: Tuple["UnbundledCommissionDetail", ...] = () Adjustments: Tuple = () # TODO PriorPeriodPositions: Tuple["PriorPeriodPosition", ...] = () CorporateActions: Tuple["CorporateAction", ...] = () ClientFees: Tuple["ClientFee", ...] = () ClientFeesDetail: Tuple["_ClientFeesDetail", ...] = () DebitCardActivities: Tuple["DebitCardActivity", ...] = () SoftDollars: Tuple = () # TODO CashTransactions: Tuple["CashTransaction", ...] = () SalesTaxes: Tuple["SalesTax", ...] = () CFDCharges: Tuple = () # TODO InterestAccruals: Tuple["InterestAccrualsCurrency", ...] = () TierInterestDetails: Tuple["TierInterestDetail", ...] = () HardToBorrowDetails: Tuple["HardToBorrowDetail", ...] = () HardToBorrowMarkupDetails: Tuple = () SLBOpenContracts: Tuple = () # TODO SLBActivities: Tuple["SLBActivity", ...] = () SLBFees: Tuple["SLBFee", ...] = () Transfers: Tuple["Transfer", ...] = () ChangeInDividendAccruals: Tuple["_ChangeInDividendAccrual", ...] = () OpenDividendAccruals: Tuple["OpenDividendAccrual", ...] = () SecuritiesInfo: Tuple["SecurityInfo", ...] = () ConversionRates: Tuple["ConversionRate", ...] = () HKIPOOpenSubscriptions: Tuple = () # TODO CommissionCredits: Tuple = () # TODO StockGrantActivities: Tuple = () # TODO def __repr__(self): repr = ( f"{type(self).__name__}(" f"accountId={self.accountId!r}, " f"fromDate={self.fromDate!r}, " f"toDate={self.toDate!r}, " f"period={self.period!r}, " f"whenGenerated={self.whenGenerated!r}" ) sequences = ( (k, getattr(self, k)) for k, v in self.__annotations__.items() if hasattr(v, "__origin__") and v.__origin__ is tuple ) nonempty_sequences = ", ".join( f"len({name})={len(value)}" for (name, value) in sequences if value ) if nonempty_sequences: repr += ", " for seq in nonempty_sequences: repr += seq repr += ")" return repr @dataclass(frozen=True) class AccountInformation(FlexElement): """ Child of <FlexStatement> """ accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None name: Optional[str] = None accountType: Optional[str] = None customerType: Optional[str] = None accountCapabilities: Tuple[str, ...] = () tradingPermissions: Tuple[str, ...] = () registeredRepName: Optional[str] = None registeredRepPhone: Optional[str] = None dateOpened: Optional[datetime.date] = None dateFunded: Optional[datetime.date] = None dateClosed: Optional[datetime.date] = None street: Optional[str] = None street2: Optional[str] = None city: Optional[str] = None state: Optional[str] = None country: Optional[str] = None postalCode: Optional[str] = None streetResidentialAddress: Optional[str] = None street2ResidentialAddress: Optional[str] = None cityResidentialAddress: Optional[str] = None stateResidentialAddress: Optional[str] = None countryResidentialAddress: Optional[str] = None postalCodeResidentialAddress: Optional[str] = None masterName: Optional[str] = None ibEntity: Optional[str] = None primaryEmail: Optional[str] = None accountRepName: Optional[str] = None accountRepPhone: Optional[str] = None # Type alias to work around https://github.com/python/mypy/issues/1775 _AccountInformation = AccountInformation @dataclass(frozen=True) class ChangeInNAV(FlexElement): """ Child of <FlexStatement> """ accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None fromDate: Optional[datetime.date] = None toDate: Optional[datetime.date] = None startingValue: Optional[decimal.Decimal] = None mtm: Optional[decimal.Decimal] = None realized: Optional[decimal.Decimal] = None changeInUnrealized: Optional[decimal.Decimal] = None costAdjustments: Optional[decimal.Decimal] = None transferredPnlAdjustments: Optional[decimal.Decimal] = None depositsWithdrawals: Optional[decimal.Decimal] = None internalCashTransfers: Optional[decimal.Decimal] = None assetTransfers: Optional[decimal.Decimal] = None debitCardActivity: Optional[decimal.Decimal] = None billPay: Optional[decimal.Decimal] = None dividends: Optional[decimal.Decimal] = None withholdingTax: Optional[decimal.Decimal] = None withholding871m: Optional[decimal.Decimal] = None withholdingTaxCollected: Optional[decimal.Decimal] = None changeInDividendAccruals: Optional[decimal.Decimal] = None interest: Optional[decimal.Decimal] = None changeInInterestAccruals: Optional[decimal.Decimal] = None advisorFees: Optional[decimal.Decimal] = None brokerFees: Optional[decimal.Decimal] = None changeInBrokerFeeAccruals: Optional[decimal.Decimal] = None clientFees: Optional[decimal.Decimal] = None otherFees: Optional[decimal.Decimal] = None feesReceivables: Optional[decimal.Decimal] = None commissions: Optional[decimal.Decimal] = None commissionReceivables: Optional[decimal.Decimal] = None forexCommissions: Optional[decimal.Decimal] = None transactionTax: Optional[decimal.Decimal] = None taxReceivables: Optional[decimal.Decimal] = None salesTax: Optional[decimal.Decimal] = None softDollars: Optional[decimal.Decimal] = None netFxTrading: Optional[decimal.Decimal] = None fxTranslation: Optional[decimal.Decimal] = None linkingAdjustments: Optional[decimal.Decimal] = None other: Optional[decimal.Decimal] = None endingValue: Optional[decimal.Decimal] = None twr: Optional[decimal.Decimal] = None corporateActionProceeds: Optional[decimal.Decimal] = None commissionCreditsRedemption: Optional[decimal.Decimal] = None grantActivity: Optional[decimal.Decimal] = None excessFundSweep: Optional[decimal.Decimal] = None billableSalesTax: Optional[decimal.Decimal] = None # Type alias to work around https://github.com/python/mypy/issues/1775 _ChangeInNAV = ChangeInNAV @dataclass(frozen=True) class MTMPerformanceSummaryUnderlying(FlexElement): """ Wrapped in <MTMPerformanceSummaryInBase> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None sedol: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None reportDate: Optional[datetime.date] = None prevCloseQuantity: Optional[decimal.Decimal] = None prevClosePrice: Optional[decimal.Decimal] = None closeQuantity: Optional[decimal.Decimal] = None closePrice: Optional[decimal.Decimal] = None transactionMtm: Optional[decimal.Decimal] = None priorOpenMtm: Optional[decimal.Decimal] = None commissions: Optional[decimal.Decimal] = None other: Optional[decimal.Decimal] = None total: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () corpActionMtm: Optional[decimal.Decimal] = None dividends: Optional[decimal.Decimal] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None otherWithAccruals: Optional[decimal.Decimal] = None totalWithAccruals: Optional[decimal.Decimal] = None @dataclass(frozen=True) class EquitySummaryByReportDateInBase(FlexElement): """ Wrapped in <EquitySummaryInBase> """ accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None reportDate: Optional[datetime.date] = None cash: Optional[decimal.Decimal] = None cashLong: Optional[decimal.Decimal] = None cashShort: Optional[decimal.Decimal] = None slbCashCollateral: Optional[decimal.Decimal] = None slbCashCollateralLong: Optional[decimal.Decimal] = None slbCashCollateralShort: Optional[decimal.Decimal] = None stock: Optional[decimal.Decimal] = None stockLong: Optional[decimal.Decimal] = None stockShort: Optional[decimal.Decimal] = None slbDirectSecuritiesBorrowed: Optional[decimal.Decimal] = None slbDirectSecuritiesBorrowedLong: Optional[decimal.Decimal] = None slbDirectSecuritiesBorrowedShort: Optional[decimal.Decimal] = None slbDirectSecuritiesLent: Optional[decimal.Decimal] = None slbDirectSecuritiesLentLong: Optional[decimal.Decimal] = None slbDirectSecuritiesLentShort: Optional[decimal.Decimal] = None options: Optional[decimal.Decimal] = None optionsLong: Optional[decimal.Decimal] = None optionsShort: Optional[decimal.Decimal] = None bonds: Optional[decimal.Decimal] = None bondsLong: Optional[decimal.Decimal] = None bondsShort: Optional[decimal.Decimal] = None bondInterestAccrualsComponent: Optional[decimal.Decimal] = None bondInterestAccrualsComponentLong: Optional[decimal.Decimal] = None bondInterestAccrualsComponentShort: Optional[decimal.Decimal] = None notes: Optional[decimal.Decimal] = None notesLong: Optional[decimal.Decimal] = None notesShort: Optional[decimal.Decimal] = None interestAccruals: Optional[decimal.Decimal] = None interestAccrualsLong: Optional[decimal.Decimal] = None interestAccrualsShort: Optional[decimal.Decimal] = None softDollars: Optional[decimal.Decimal] = None softDollarsLong: Optional[decimal.Decimal] = None softDollarsShort: Optional[decimal.Decimal] = None dividendAccruals: Optional[decimal.Decimal] = None dividendAccrualsLong: Optional[decimal.Decimal] = None dividendAccrualsShort: Optional[decimal.Decimal] = None total: Optional[decimal.Decimal] = None totalLong: Optional[decimal.Decimal] = None totalShort: Optional[decimal.Decimal] = None commodities: Optional[decimal.Decimal] = None commoditiesLong: Optional[decimal.Decimal] = None commoditiesShort: Optional[decimal.Decimal] = None funds: Optional[decimal.Decimal] = None fundsLong: Optional[decimal.Decimal] = None fundsShort: Optional[decimal.Decimal] = None forexCfdUnrealizedPl: Optional[decimal.Decimal] = None forexCfdUnrealizedPlLong: Optional[decimal.Decimal] = None forexCfdUnrealizedPlShort: Optional[decimal.Decimal] = None brokerInterestAccrualsComponent: Optional[decimal.Decimal] = None brokerCashComponent: Optional[decimal.Decimal] = None brokerFeesAccrualsComponent: Optional[decimal.Decimal] = None brokerFeesAccrualsComponentLong: Optional[decimal.Decimal] = None brokerFeesAccrualsComponentShort: Optional[decimal.Decimal] = None cfdUnrealizedPl: Optional[decimal.Decimal] = None fdicInsuredBankSweepAccount: Optional[decimal.Decimal] = None fdicInsuredBankSweepAccountLong: Optional[decimal.Decimal] = None fdicInsuredBankSweepAccountShort: Optional[decimal.Decimal] = None fdicInsuredBankSweepAccountCashComponent: Optional[decimal.Decimal] = None fdicInsuredBankSweepAccountCashComponentLong: Optional[decimal.Decimal] = None fdicInsuredBankSweepAccountCashComponentShort: Optional[decimal.Decimal] = None fdicInsuredAccountInterestAccruals: Optional[decimal.Decimal] = None fdicInsuredAccountInterestAccrualsLong: Optional[decimal.Decimal] = None fdicInsuredAccountInterestAccrualsShort: Optional[decimal.Decimal] = None fdicInsuredAccountInterestAccrualsComponent: Optional[decimal.Decimal] = None fdicInsuredAccountInterestAccrualsComponentLong: Optional[decimal.Decimal] = None fdicInsuredAccountInterestAccrualsComponentShort: Optional[decimal.Decimal] = None brokerCashComponentLong: Optional[decimal.Decimal] = None brokerCashComponentShort: Optional[decimal.Decimal] = None brokerInterestAccrualsComponentLong: Optional[decimal.Decimal] = None brokerInterestAccrualsComponentShort: Optional[decimal.Decimal] = None cfdUnrealizedPlLong: Optional[decimal.Decimal] = None cfdUnrealizedPlShort: Optional[decimal.Decimal] = None ipoSubscription: Optional[decimal.Decimal] = None ipoSubscriptionLong: Optional[decimal.Decimal] = None ipoSubscriptionShort: Optional[decimal.Decimal] = None physDel: Optional[decimal.Decimal] = None physDelLong: Optional[decimal.Decimal] = None physDelShort: Optional[decimal.Decimal] = None @dataclass(frozen=True) class MTDYTDPerformanceSummaryUnderlying(FlexElement): """ Wrapped in <MTDYTDPerformanceSummary> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None mtmMTD: Optional[decimal.Decimal] = None mtmYTD: Optional[decimal.Decimal] = None realSTMTD: Optional[decimal.Decimal] = None realSTYTD: Optional[decimal.Decimal] = None realLTMTD: Optional[decimal.Decimal] = None realLTYTD: Optional[decimal.Decimal] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None realizedPnlMTD: Optional[decimal.Decimal] = None realizedCapitalGainsPnlMTD: Optional[decimal.Decimal] = None realizedFxPnlMTD: Optional[decimal.Decimal] = None realizedPnlYTD: Optional[decimal.Decimal] = None realizedCapitalGainsPnlYTD: Optional[decimal.Decimal] = None realizedFxPnlYTD: Optional[decimal.Decimal] = None brokerFees: Optional[decimal.Decimal] = None brokerFeesSec: Optional[decimal.Decimal] = None brokerFeesCom: Optional[decimal.Decimal] = None brokerFeesMTD: Optional[decimal.Decimal] = None brokerFeesYTD: Optional[decimal.Decimal] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class CashReportCurrency(FlexElement): """ Wrapped in <CashReport> """ accountId: Optional[str] = None currency: Optional[str] = None fromDate: Optional[datetime.date] = None toDate: Optional[datetime.date] = None startingCash: Optional[decimal.Decimal] = None startingCashSec: Optional[decimal.Decimal] = None startingCashCom: Optional[decimal.Decimal] = None clientFees: Optional[decimal.Decimal] = None clientFeesSec: Optional[decimal.Decimal] = None clientFeesCom: Optional[decimal.Decimal] = None commissions: Optional[decimal.Decimal] = None commissionsSec: Optional[decimal.Decimal] = None commissionsCom: Optional[decimal.Decimal] = None billableCommissions: Optional[decimal.Decimal] = None billableCommissionsSec: Optional[decimal.Decimal] = None billableCommissionsCom: Optional[decimal.Decimal] = None depositWithdrawals: Optional[decimal.Decimal] = None depositWithdrawalsSec: Optional[decimal.Decimal] = None depositWithdrawalsCom: Optional[decimal.Decimal] = None deposits: Optional[decimal.Decimal] = None depositsSec: Optional[decimal.Decimal] = None depositsCom: Optional[decimal.Decimal] = None withdrawals: Optional[decimal.Decimal] = None withdrawalsSec: Optional[decimal.Decimal] = None withdrawalsCom: Optional[decimal.Decimal] = None accountTransfers: Optional[decimal.Decimal] = None accountTransfersSec: Optional[decimal.Decimal] = None accountTransfersCom: Optional[decimal.Decimal] = None internalTransfers: Optional[decimal.Decimal] = None internalTransfersSec: Optional[decimal.Decimal] = None internalTransfersCom: Optional[decimal.Decimal] = None dividends: Optional[decimal.Decimal] = None dividendsSec: Optional[decimal.Decimal] = None dividendsCom: Optional[decimal.Decimal] = None brokerFees: Optional[decimal.Decimal] = None brokerFeesSec: Optional[decimal.Decimal] = None brokerFeesCom: Optional[decimal.Decimal] = None brokerFeesMTD: Optional[decimal.Decimal] = None brokerFeesYTD: Optional[decimal.Decimal] = None brokerInterest: Optional[decimal.Decimal] = None brokerInterestSec: Optional[decimal.Decimal] = None brokerInterestCom: Optional[decimal.Decimal] = None bondInterest: Optional[decimal.Decimal] = None bondInterestSec: Optional[decimal.Decimal] = None bondInterestCom: Optional[decimal.Decimal] = None cashSettlingMtm: Optional[decimal.Decimal] = None cashSettlingMtmSec: Optional[decimal.Decimal] = None cashSettlingMtmCom: Optional[decimal.Decimal] = None cfdCharges: Optional[decimal.Decimal] = None cfdChargesSec: Optional[decimal.Decimal] = None cfdChargesCom: Optional[decimal.Decimal] = None netTradesSales: Optional[decimal.Decimal] = None netTradesSalesSec: Optional[decimal.Decimal] = None netTradesSalesCom: Optional[decimal.Decimal] = None netTradesPurchases: Optional[decimal.Decimal] = None netTradesPurchasesSec: Optional[decimal.Decimal] = None netTradesPurchasesCom: Optional[decimal.Decimal] = None feesReceivables: Optional[decimal.Decimal] = None feesReceivablesSec: Optional[decimal.Decimal] = None feesReceivablesCom: Optional[decimal.Decimal] = None paymentInLieu: Optional[decimal.Decimal] = None paymentInLieuSec: Optional[decimal.Decimal] = None paymentInLieuCom: Optional[decimal.Decimal] = None transactionTax: Optional[decimal.Decimal] = None transactionTaxSec: Optional[decimal.Decimal] = None transactionTaxCom: Optional[decimal.Decimal] = None withholdingTax: Optional[decimal.Decimal] = None withholdingTaxSec: Optional[decimal.Decimal] = None withholdingTaxCom: Optional[decimal.Decimal] = None fxTranslationGainLoss: Optional[decimal.Decimal] = None fxTranslationGainLossSec: Optional[decimal.Decimal] = None fxTranslationGainLossCom: Optional[decimal.Decimal] = None otherFees: Optional[decimal.Decimal] = None otherFeesSec: Optional[decimal.Decimal] = None otherFeesCom: Optional[decimal.Decimal] = None endingCash: Optional[decimal.Decimal] = None endingCashSec: Optional[decimal.Decimal] = None endingCashCom: Optional[decimal.Decimal] = None endingSettledCash: Optional[decimal.Decimal] = None endingSettledCashSec: Optional[decimal.Decimal] = None endingSettledCashCom: Optional[decimal.Decimal] = None clientFeesMTD: Optional[decimal.Decimal] = None clientFeesYTD: Optional[decimal.Decimal] = None commissionsMTD: Optional[decimal.Decimal] = None commissionsYTD: Optional[decimal.Decimal] = None billableCommissionsMTD: Optional[decimal.Decimal] = None billableCommissionsYTD: Optional[decimal.Decimal] = None depositWithdrawalsMTD: Optional[decimal.Decimal] = None depositWithdrawalsYTD: Optional[decimal.Decimal] = None depositsMTD: Optional[decimal.Decimal] = None depositsYTD: Optional[decimal.Decimal] = None withdrawalsMTD: Optional[decimal.Decimal] = None withdrawalsYTD: Optional[decimal.Decimal] = None accountTransfersMTD: Optional[decimal.Decimal] = None accountTransfersYTD: Optional[decimal.Decimal] = None internalTransfersMTD: Optional[decimal.Decimal] = None internalTransfersYTD: Optional[decimal.Decimal] = None excessFundSweep: Optional[decimal.Decimal] = None excessFundSweepSec: Optional[decimal.Decimal] = None excessFundSweepCom: Optional[decimal.Decimal] = None excessFundSweepMTD: Optional[decimal.Decimal] = None excessFundSweepYTD: Optional[decimal.Decimal] = None dividendsMTD: Optional[decimal.Decimal] = None dividendsYTD: Optional[decimal.Decimal] = None insuredDepositInterestMTD: Optional[decimal.Decimal] = None insuredDepositInterestYTD: Optional[decimal.Decimal] = None brokerInterestMTD: Optional[decimal.Decimal] = None brokerInterestYTD: Optional[decimal.Decimal] = None bondInterestMTD: Optional[decimal.Decimal] = None bondInterestYTD: Optional[decimal.Decimal] = None cashSettlingMtmMTD: Optional[decimal.Decimal] = None cashSettlingMtmYTD: Optional[decimal.Decimal] = None realizedVmMTD: Optional[decimal.Decimal] = None realizedVmYTD: Optional[decimal.Decimal] = None cfdChargesMTD: Optional[decimal.Decimal] = None cfdChargesYTD: Optional[decimal.Decimal] = None netTradesSalesMTD: Optional[decimal.Decimal] = None netTradesSalesYTD: Optional[decimal.Decimal] = None advisorFeesMTD: Optional[decimal.Decimal] = None advisorFeesYTD: Optional[decimal.Decimal] = None feesReceivablesMTD: Optional[decimal.Decimal] = None feesReceivablesYTD: Optional[decimal.Decimal] = None netTradesPurchasesMTD: Optional[decimal.Decimal] = None netTradesPurchasesYTD: Optional[decimal.Decimal] = None paymentInLieuMTD: Optional[decimal.Decimal] = None paymentInLieuYTD: Optional[decimal.Decimal] = None transactionTaxMTD: Optional[decimal.Decimal] = None transactionTaxYTD: Optional[decimal.Decimal] = None taxReceivablesMTD: Optional[decimal.Decimal] = None taxReceivablesYTD: Optional[decimal.Decimal] = None withholdingTaxMTD: Optional[decimal.Decimal] = None withholdingTaxYTD: Optional[decimal.Decimal] = None withholding871mMTD: Optional[decimal.Decimal] = None withholding871mYTD: Optional[decimal.Decimal] = None withholdingCollectedTaxMTD: Optional[decimal.Decimal] = None withholdingCollectedTaxYTD: Optional[decimal.Decimal] = None salesTaxMTD: Optional[decimal.Decimal] = None salesTaxYTD: Optional[decimal.Decimal] = None otherFeesMTD: Optional[decimal.Decimal] = None otherFeesYTD: Optional[decimal.Decimal] = None acctAlias: Optional[str] = None model: Optional[str] = None avgCreditBalance: Optional[decimal.Decimal] = None avgCreditBalanceSec: Optional[decimal.Decimal] = None avgCreditBalanceCom: Optional[decimal.Decimal] = None avgDebitBalance: Optional[decimal.Decimal] = None avgDebitBalanceSec: Optional[decimal.Decimal] = None avgDebitBalanceCom: Optional[decimal.Decimal] = None linkingAdjustments: Optional[decimal.Decimal] = None linkingAdjustmentsSec: Optional[decimal.Decimal] = None linkingAdjustmentsCom: Optional[decimal.Decimal] = None insuredDepositInterest: Optional[decimal.Decimal] = None insuredDepositInterestSec: Optional[decimal.Decimal] = None insuredDepositInterestCom: Optional[decimal.Decimal] = None realizedVm: Optional[decimal.Decimal] = None realizedVmSec: Optional[decimal.Decimal] = None realizedVmCom: Optional[decimal.Decimal] = None advisorFees: Optional[decimal.Decimal] = None advisorFeesSec: Optional[decimal.Decimal] = None advisorFeesCom: Optional[decimal.Decimal] = None taxReceivables: Optional[decimal.Decimal] = None taxReceivablesSec: Optional[decimal.Decimal] = None taxReceivablesCom: Optional[decimal.Decimal] = None withholding871m: Optional[decimal.Decimal] = None withholding871mSec: Optional[decimal.Decimal] = None withholding871mCom: Optional[decimal.Decimal] = None withholdingCollectedTax: Optional[decimal.Decimal] = None withholdingCollectedTaxSec: Optional[decimal.Decimal] = None withholdingCollectedTaxCom: Optional[decimal.Decimal] = None salesTax: Optional[decimal.Decimal] = None salesTaxSec: Optional[decimal.Decimal] = None salesTaxCom: Optional[decimal.Decimal] = None other: Optional[decimal.Decimal] = None otherSec: Optional[decimal.Decimal] = None otherCom: Optional[decimal.Decimal] = None levelOfDetail: Optional[str] = None debitCardActivity: Optional[decimal.Decimal] = None debitCardActivitySec: Optional[decimal.Decimal] = None debitCardActivityCom: Optional[decimal.Decimal] = None debitCardActivityMTD: Optional[decimal.Decimal] = None debitCardActivityYTD: Optional[decimal.Decimal] = None billPay: Optional[decimal.Decimal] = None billPaySec: Optional[decimal.Decimal] = None billPayCom: Optional[decimal.Decimal] = None billPayMTD: Optional[decimal.Decimal] = None billPayYTD: Optional[decimal.Decimal] = None realizedForexVm: Optional[decimal.Decimal] = None realizedForexVmSec: Optional[decimal.Decimal] = None realizedForexVmCom: Optional[decimal.Decimal] = None realizedForexVmMTD: Optional[decimal.Decimal] = None realizedForexVmYTD: Optional[decimal.Decimal] = None ipoSubscription: Optional[decimal.Decimal] = None ipoSubscriptionSec: Optional[decimal.Decimal] = None ipoSubscriptionCom: Optional[decimal.Decimal] = None ipoSubscriptionMTD: Optional[decimal.Decimal] = None ipoSubscriptionYTD: Optional[decimal.Decimal] = None billableSalesTax: Optional[decimal.Decimal] = None billableSalesTaxSec: Optional[decimal.Decimal] = None billableSalesTaxCom: Optional[decimal.Decimal] = None billableSalesTaxMTD: Optional[decimal.Decimal] = None billableSalesTaxYTD: Optional[decimal.Decimal] = None commissionCreditsRedemption: Optional[decimal.Decimal] = None commissionCreditsRedemptionSec: Optional[decimal.Decimal] = None commissionCreditsRedemptionCom: Optional[decimal.Decimal] = None commissionCreditsRedemptionMTD: Optional[decimal.Decimal] = None commissionCreditsRedemptionYTD: Optional[decimal.Decimal] = None @dataclass(frozen=True) class StatementOfFundsLine(FlexElement): """ Wrapped in <StmtFunds> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None balance: Optional[decimal.Decimal] = None debit: Optional[decimal.Decimal] = None credit: Optional[decimal.Decimal] = None currency: Optional[str] = None tradeID: Optional[str] = None # Despite the name, `date` actually contains date/time data. date: Optional[datetime.datetime] = None reportDate: Optional[datetime.date] = None activityDescription: Optional[str] = None amount: Optional[decimal.Decimal] = None buySell: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None fxRateToBase: Optional[decimal.Decimal] = None listingExchange: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None settleDate: Optional[datetime.date] = None activityCode: Optional[str] = None # FIXME orderID: Optional[str] = None tradeQuantity: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None tradeGross: Optional[decimal.Decimal] = None tradeCommission: Optional[decimal.Decimal] = None tradeTax: Optional[decimal.Decimal] = None tradeCode: Optional[str] = None levelOfDetail: Optional[str] = None transactionID: Optional[str] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class ChangeInPositionValue(FlexElement): """ Wrapped in <ChangeInPositionValues> """ assetCategory: Optional[enums.AssetClass] = None currency: Optional[str] = None priorPeriodValue: Optional[decimal.Decimal] = None transactions: Optional[decimal.Decimal] = None mtmPriorPeriodPositions: Optional[decimal.Decimal] = None mtmTransactions: Optional[decimal.Decimal] = None corporateActions: Optional[decimal.Decimal] = None accountTransfers: Optional[decimal.Decimal] = None fxTranslationPnl: Optional[decimal.Decimal] = None futurePriceAdjustments: Optional[decimal.Decimal] = None settledCash: Optional[decimal.Decimal] = None endOfPeriodValue: Optional[decimal.Decimal] = None accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None other: Optional[decimal.Decimal] = None linkingAdjustments: Optional[decimal.Decimal] = None @dataclass(frozen=True) class OpenPosition(FlexElement): """ Wrapped in <OpenPositions> """ side: Optional[enums.LongShort] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None reportDate: Optional[datetime.date] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None multiplier: Optional[decimal.Decimal] = None position: Optional[decimal.Decimal] = None markPrice: Optional[decimal.Decimal] = None positionValue: Optional[decimal.Decimal] = None openPrice: Optional[decimal.Decimal] = None costBasisPrice: Optional[decimal.Decimal] = None costBasisMoney: Optional[decimal.Decimal] = None fifoPnlUnrealized: Optional[decimal.Decimal] = None levelOfDetail: Optional[str] = None openDateTime: Optional[datetime.datetime] = None holdingPeriodDateTime: Optional[datetime.datetime] = None securityIDType: Optional[str] = None issuer: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None code: Tuple[enums.Code, ...] = () originatingOrderID: Optional[str] = None originatingTransactionID: Optional[str] = None accruedInt: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None sedol: Optional[str] = None percentOfNAV: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None listingExchange: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None positionValueInBase: Optional[decimal.Decimal] = None unrealizedCapitalGainsPnl: Optional[decimal.Decimal] = None unrealizedlFxPnl: Optional[decimal.Decimal] = None vestingDate: Optional[datetime.date] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class FxLot(FlexElement): """ Wrapped in <FxLots>, which in turn is wrapped in <FxPositions> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None reportDate: Optional[datetime.date] = None functionalCurrency: Optional[str] = None fxCurrency: Optional[str] = None quantity: Optional[decimal.Decimal] = None costPrice: Optional[decimal.Decimal] = None costBasis: Optional[decimal.Decimal] = None closePrice: Optional[decimal.Decimal] = None value: Optional[decimal.Decimal] = None unrealizedPL: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () lotDescription: Optional[str] = None lotOpenDateTime: Optional[datetime.datetime] = None levelOfDetail: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None @dataclass(frozen=True) class Trade(FlexElement): """ Wrapped in <Trades> """ transactionType: Optional[enums.TradeType] = None openCloseIndicator: Optional[enums.OpenClose] = None buySell: Optional[enums.BuySell] = None orderType: Optional[enums.OrderType] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None tradeID: Optional[str] = None reportDate: Optional[datetime.date] = None tradeDate: Optional[datetime.date] = None tradeTime: Optional[datetime.time] = None settleDateTarget: Optional[datetime.date] = None exchange: Optional[str] = None quantity: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None tradeMoney: Optional[decimal.Decimal] = None taxes: Optional[decimal.Decimal] = None ibCommission: Optional[decimal.Decimal] = None ibCommissionCurrency: Optional[str] = None netCash: Optional[decimal.Decimal] = None netCashInBase: Optional[decimal.Decimal] = None closePrice: Optional[decimal.Decimal] = None notes: Tuple[enums.Code, ...] = () # separator = ";" cost: Optional[decimal.Decimal] = None mtmPnl: Optional[decimal.Decimal] = None origTradePrice: Optional[decimal.Decimal] = None origTradeDate: Optional[datetime.date] = None origTradeID: Optional[str] = None origOrderID: Optional[str] = None openDateTime: Optional[datetime.datetime] = None fifoPnlRealized: Optional[decimal.Decimal] = None capitalGainsPnl: Optional[decimal.Decimal] = None levelOfDetail: Optional[str] = None ibOrderID: Optional[str] = None # Despite the name, `orderTime` actually contains date/time data. orderTime: Optional[datetime.datetime] = None changeInPrice: Optional[decimal.Decimal] = None changeInQuantity: Optional[decimal.Decimal] = None proceeds: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None clearingFirmID: Optional[str] = None # Effective 2013, every Trade has a `transactionID` attribute that can't # be deselected in the Flex query template. transactionID: Optional[str] = None holdingPeriodDateTime: Optional[datetime.datetime] = None ibExecID: Optional[str] = None brokerageOrderID: Optional[str] = None orderReference: Optional[str] = None volatilityOrderLink: Optional[str] = None exchOrderId: Optional[str] = None extExecID: Optional[str] = None traderID: Optional[str] = None isAPIOrder: Optional[bool] = None acctAlias: Optional[str] = None model: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None principalAdjustFactor: Optional[decimal.Decimal] = None dateTime: Optional[datetime.datetime] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None sedol: Optional[str] = None whenRealized: Optional[datetime.datetime] = None whenReopened: Optional[datetime.datetime] = None accruedInt: Optional[decimal.Decimal] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class Lot(FlexElement): """ Wrapped in <Trades> """ transactionType: Optional[enums.TradeType] = None openCloseIndicator: Optional[enums.OpenClose] = None buySell: Optional[enums.BuySell] = None orderType: Optional[enums.OrderType] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None tradeID: Optional[str] = None reportDate: Optional[datetime.date] = None tradeDate: Optional[datetime.date] = None tradeTime: Optional[datetime.time] = None settleDateTarget: Optional[datetime.date] = None exchange: Optional[str] = None quantity: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None tradeMoney: Optional[decimal.Decimal] = None taxes: Optional[decimal.Decimal] = None ibCommission: Optional[decimal.Decimal] = None ibCommissionCurrency: Optional[str] = None netCash: Optional[decimal.Decimal] = None netCashInBase: Optional[decimal.Decimal] = None closePrice: Optional[decimal.Decimal] = None notes: Tuple[enums.Code, ...] = () # separator = ";" cost: Optional[decimal.Decimal] = None mtmPnl: Optional[decimal.Decimal] = None origTradePrice: Optional[decimal.Decimal] = None origTradeDate: Optional[datetime.date] = None origTradeID: Optional[str] = None origOrderID: Optional[str] = None openDateTime: Optional[datetime.datetime] = None fifoPnlRealized: Optional[decimal.Decimal] = None capitalGainsPnl: Optional[decimal.Decimal] = None levelOfDetail: Optional[str] = None ibOrderID: Optional[str] = None # Despite the name, `orderTime` actually contains date/time data. orderTime: Optional[datetime.datetime] = None changeInPrice: Optional[decimal.Decimal] = None changeInQuantity: Optional[decimal.Decimal] = None proceeds: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None clearingFirmID: Optional[str] = None # Effective 2013, every Trade has a `transactionID` attribute that can't # be deselected in the Flex query template. transactionID: Optional[str] = None holdingPeriodDateTime: Optional[datetime.datetime] = None ibExecID: Optional[str] = None brokerageOrderID: Optional[str] = None orderReference: Optional[str] = None volatilityOrderLink: Optional[str] = None exchOrderId: Optional[str] = None extExecID: Optional[str] = None traderID: Optional[str] = None isAPIOrder: Optional[bool] = None acctAlias: Optional[str] = None model: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None principalAdjustFactor: Optional[decimal.Decimal] = None dateTime: Optional[datetime.datetime] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None sedol: Optional[str] = None whenRealized: Optional[datetime.datetime] = None whenReopened: Optional[datetime.datetime] = None @dataclass(frozen=True) class UnbundledCommissionDetail(FlexElement): """ Wrapped in <UnbundledCommissionDetails> """ buySell: Optional[enums.BuySell] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None sedol: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None dateTime: Optional[datetime.datetime] = None exchange: Optional[str] = None quantity: Optional[decimal.Decimal] = None price: Optional[decimal.Decimal] = None tradeID: Optional[str] = None orderReference: Optional[str] = None totalCommission: Optional[decimal.Decimal] = None brokerExecutionCharge: Optional[decimal.Decimal] = None brokerClearingCharge: Optional[decimal.Decimal] = None thirdPartyExecutionCharge: Optional[decimal.Decimal] = None thirdPartyClearingCharge: Optional[decimal.Decimal] = None thirdPartyRegulatoryCharge: Optional[decimal.Decimal] = None regFINRATradingActivityFee: Optional[decimal.Decimal] = None regSection31TransactionFee: Optional[decimal.Decimal] = None regOther: Optional[decimal.Decimal] = None other: Optional[decimal.Decimal] = None @dataclass(frozen=True) class SymbolSummary(FlexElement): """ Wrapped in <TradeConfirms> """ accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None assetCategory: Optional[enums.AssetClass] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None transactionType: Optional[enums.TradeType] = None tradeID: Optional[str] = None orderID: Optional[decimal.Decimal] = None execID: Optional[str] = None brokerageOrderID: Optional[str] = None orderReference: Optional[str] = None volatilityOrderLink: Optional[str] = None clearingFirmID: Optional[str] = None origTradePrice: Optional[decimal.Decimal] = None origTradeDate: Optional[datetime.date] = None origTradeID: Optional[str] = None # Despite the name, `orderTime` actually contains date/time data. orderTime: Optional[datetime.datetime] = None dateTime: Optional[datetime.datetime] = None reportDate: Optional[datetime.date] = None settleDate: Optional[datetime.date] = None tradeDate: Optional[datetime.date] = None exchange: Optional[str] = None buySell: Optional[enums.BuySell] = None quantity: Optional[decimal.Decimal] = None price: Optional[decimal.Decimal] = None amount: Optional[decimal.Decimal] = None proceeds: Optional[decimal.Decimal] = None commission: Optional[decimal.Decimal] = None brokerExecutionCommission: Optional[decimal.Decimal] = None brokerClearingCommission: Optional[decimal.Decimal] = None thirdPartyExecutionCommission: Optional[decimal.Decimal] = None thirdPartyClearingCommission: Optional[decimal.Decimal] = None thirdPartyRegulatoryCommission: Optional[decimal.Decimal] = None otherCommission: Optional[decimal.Decimal] = None commissionCurrency: Optional[str] = None tax: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () orderType: Optional[enums.OrderType] = None levelOfDetail: Optional[str] = None traderID: Optional[str] = None isAPIOrder: Optional[bool] = None allocatedTo: Optional[str] = None accruedInt: Optional[decimal.Decimal] = None @dataclass(frozen=True) class Order(FlexElement): """ Wrapped in <TradeConfirms> or <Trades>""" accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None assetCategory: Optional[enums.AssetClass] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None transactionType: Optional[enums.TradeType] = None tradeID: Optional[str] = None orderID: Optional[decimal.Decimal] = None execID: Optional[str] = None brokerageOrderID: Optional[str] = None orderReference: Optional[str] = None volatilityOrderLink: Optional[str] = None clearingFirmID: Optional[str] = None origTradePrice: Optional[decimal.Decimal] = None origTradeDate: Optional[datetime.date] = None origTradeID: Optional[str] = None # Despite the name, `orderTime` actually contains date/time data. orderTime: Optional[datetime.datetime] = None dateTime: Optional[datetime.datetime] = None reportDate: Optional[datetime.date] = None settleDate: Optional[datetime.date] = None tradeDate: Optional[datetime.date] = None exchange: Optional[str] = None buySell: Optional[enums.BuySell] = None quantity: Optional[decimal.Decimal] = None price: Optional[decimal.Decimal] = None amount: Optional[decimal.Decimal] = None proceeds: Optional[decimal.Decimal] = None commission: Optional[decimal.Decimal] = None brokerExecutionCommission: Optional[decimal.Decimal] = None brokerClearingCommission: Optional[decimal.Decimal] = None thirdPartyExecutionCommission: Optional[decimal.Decimal] = None thirdPartyClearingCommission: Optional[decimal.Decimal] = None thirdPartyRegulatoryCommission: Optional[decimal.Decimal] = None otherCommission: Optional[decimal.Decimal] = None commissionCurrency: Optional[str] = None tax: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () orderType: Optional[enums.OrderType] = None levelOfDetail: Optional[str] = None traderID: Optional[str] = None isAPIOrder: Optional[bool] = None allocatedTo: Optional[str] = None accruedInt: Optional[decimal.Decimal] = None netCash: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None ibCommission: Optional[decimal.Decimal] = None ibOrderID: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None settleDateTarget: Optional[datetime.date] = None tradeMoney: Optional[decimal.Decimal] = None taxes: Optional[decimal.Decimal] = None ibCommissionCurrency: Optional[str] = None closePrice: Optional[decimal.Decimal] = None openCloseIndicator: Optional[enums.OpenClose] = None notes: Optional[str] = None cost: Optional[decimal.Decimal] = None fifoPnlRealized: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None mtmPnl: Optional[decimal.Decimal] = None origOrderID: Optional[str] = None transactionID: Optional[str] = None ibExecID: Optional[str] = None exchOrderId: Optional[str] = None extExecID: Optional[str] = None openDateTime: Optional[datetime.datetime] = None holdingPeriodDateTime: Optional[datetime.datetime] = None whenRealized: Optional[datetime.datetime] = None whenReopened: Optional[datetime.datetime] = None changeInPrice: Optional[decimal.Decimal] = None changeInQuantity: Optional[decimal.Decimal] = None @dataclass(frozen=True) class TradeConfirm(FlexElement): """ Wrapped in <TradeConfirms> """ transactionType: Optional[enums.TradeType] = None openCloseIndicator: Optional[enums.OpenClose] = None buySell: Optional[enums.BuySell] = None orderType: Optional[enums.OrderType] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None tradeID: Optional[str] = None reportDate: Optional[datetime.date] = None tradeDate: Optional[datetime.date] = None tradeTime: Optional[datetime.time] = None settleDateTarget: Optional[datetime.date] = None exchange: Optional[str] = None quantity: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None tradeMoney: Optional[decimal.Decimal] = None proceeds: Optional[decimal.Decimal] = None taxes: Optional[decimal.Decimal] = None ibCommission: Optional[decimal.Decimal] = None ibCommissionCurrency: Optional[str] = None netCash: Optional[decimal.Decimal] = None closePrice: Optional[decimal.Decimal] = None notes: Tuple[enums.Code, ...] = () # separator = ";" cost: Optional[decimal.Decimal] = None fifoPnlRealized: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None mtmPnl: Optional[decimal.Decimal] = None origTradePrice: Optional[decimal.Decimal] = None origTradeDate: Optional[datetime.date] = None origTradeID: Optional[str] = None origOrderID: Optional[str] = None clearingFirmID: Optional[str] = None transactionID: Optional[str] = None openDateTime: Optional[datetime.datetime] = None holdingPeriodDateTime: Optional[datetime.datetime] = None whenRealized: Optional[datetime.datetime] = None whenReopened: Optional[datetime.datetime] = None levelOfDetail: Optional[str] = None commissionCurrency: Optional[str] = None price: Optional[decimal.Decimal] = None thirdPartyClearingCommission: Optional[decimal.Decimal] = None orderID: Optional[decimal.Decimal] = None allocatedTo: Optional[str] = None thirdPartyRegulatoryCommission: Optional[decimal.Decimal] = None dateTime: Optional[datetime.datetime] = None brokerExecutionCommission: Optional[decimal.Decimal] = None thirdPartyExecutionCommission: Optional[decimal.Decimal] = None amount: Optional[decimal.Decimal] = None otherCommission: Optional[decimal.Decimal] = None commission: Optional[decimal.Decimal] = None brokerClearingCommission: Optional[decimal.Decimal] = None ibOrderID: Optional[str] = None ibExecID: Optional[str] = None execID: Optional[str] = None brokerageOrderID: Optional[str] = None orderReference: Optional[str] = None volatilityOrderLink: Optional[str] = None exchOrderId: Optional[str] = None extExecID: Optional[str] = None # Despite the name, `orderTime` actually contains date/time data. orderTime: Optional[datetime.datetime] = None changeInPrice: Optional[decimal.Decimal] = None changeInQuantity: Optional[decimal.Decimal] = None traderID: Optional[str] = None isAPIOrder: Optional[bool] = None code: Tuple[enums.Code, ...] = () tax: Optional[decimal.Decimal] = None listingExchange: Optional[str] = None underlyingListingExchange: Optional[str] = None settleDate: Optional[datetime.date] = None underlyingSecurityID: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None accruedInt: Optional[decimal.Decimal] = None @dataclass(frozen=True) class OptionEAE(FlexElement): """Option Exercise Assignment or Expiration Wrapped in (identically-named) <OptionEAE> """ transactionType: Optional[enums.OptionAction] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None date: Optional[datetime.date] = None quantity: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None markPrice: Optional[decimal.Decimal] = None proceeds: Optional[decimal.Decimal] = None commisionsAndTax: Optional[decimal.Decimal] = None costBasis: Optional[decimal.Decimal] = None realizedPnl: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None mtmPnl: Optional[decimal.Decimal] = None tradeID: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None # Type alias to work around https://github.com/python/mypy/issues/1775 _OptionEAE = OptionEAE @dataclass(frozen=True) class TradeTransfer(FlexElement): """ Wrapped in <TradeTransfers> """ transactionType: Optional[enums.TradeType] = None openCloseIndicator: Optional[enums.OpenClose] = None direction: Optional[enums.ToFrom] = None deliveredReceived: Optional[enums.DeliveredReceived] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None underlyingConid: Optional[str] = None tradeID: Optional[str] = None reportDate: Optional[datetime.date] = None tradeDate: Optional[datetime.date] = None tradeTime: Optional[datetime.time] = None settleDateTarget: Optional[datetime.date] = None exchange: Optional[str] = None quantity: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None tradeMoney: Optional[decimal.Decimal] = None taxes: Optional[decimal.Decimal] = None ibCommission: Optional[decimal.Decimal] = None ibCommissionCurrency: Optional[str] = None closePrice: Optional[decimal.Decimal] = None notes: Tuple[enums.Code, ...] = () # separator = ";" cost: Optional[decimal.Decimal] = None fifoPnlRealized: Optional[decimal.Decimal] = None mtmPnl: Optional[decimal.Decimal] = None brokerName: Optional[str] = None brokerAccount: Optional[str] = None awayBrokerCommission: Optional[decimal.Decimal] = None regulatoryFee: Optional[decimal.Decimal] = None netTradeMoney: Optional[decimal.Decimal] = None netTradeMoneyInBase: Optional[decimal.Decimal] = None netTradePrice: Optional[decimal.Decimal] = None multiplier: Optional[decimal.Decimal] = None acctAlias: Optional[str] = None model: Optional[str] = None sedol: Optional[str] = None securityID: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None proceeds: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None netCash: Optional[decimal.Decimal] = None origTradePrice: Optional[decimal.Decimal] = None # Oddly, `origTradeDate` appears to have hard-coded YYYYMMDD format # instead of the date format from the report configuration. origTradeDate: Optional[datetime.date] = None origTradeID: Optional[str] = None origOrderID: Optional[str] = None clearingFirmID: Optional[str] = None transactionID: Optional[str] = None openDateTime: Optional[datetime.datetime] = None holdingPeriodDateTime: Optional[datetime.datetime] = None whenRealized: Optional[datetime.datetime] = None whenReopened: Optional[datetime.datetime] = None levelOfDetail: Optional[str] = None securityIDType: Optional[str] = None @dataclass(frozen=True) class InterestAccrualsCurrency(FlexElement): """ Wrapped in <InterestAccruals> """ accountId: Optional[str] = None currency: Optional[str] = None fromDate: Optional[datetime.date] = None toDate: Optional[datetime.date] = None startingAccrualBalance: Optional[decimal.Decimal] = None interestAccrued: Optional[decimal.Decimal] = None accrualReversal: Optional[decimal.Decimal] = None endingAccrualBalance: Optional[decimal.Decimal] = None acctAlias: Optional[str] = None model: Optional[str] = None fxTranslation: Optional[decimal.Decimal] = None @dataclass(frozen=True) class TierInterestDetail(FlexElement): accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None interestType: Optional[str] = None valueDate: Optional[datetime.date] = None tierBreak: Optional[str] = None balanceThreshold: Optional[decimal.Decimal] = None securitiesPrincipal: Optional[decimal.Decimal] = None commoditiesPrincipal: Optional[decimal.Decimal] = None ibuklPrincipal: Optional[decimal.Decimal] = None totalPrincipal: Optional[decimal.Decimal] = None rate: Optional[decimal.Decimal] = None securitiesInterest: Optional[decimal.Decimal] = None commoditiesInterest: Optional[decimal.Decimal] = None ibuklInterest: Optional[decimal.Decimal] = None totalInterest: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () fromAcct: Optional[str] = None toAcct: Optional[str] = None @dataclass(frozen=True) class HardToBorrowDetail(FlexElement): """ Wrapped in <HardToBorrowDetails> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None valueDate: Optional[datetime.date] = None quantity: Optional[decimal.Decimal] = None price: Optional[decimal.Decimal] = None value: Optional[decimal.Decimal] = None borrowFeeRate: Optional[decimal.Decimal] = None borrowFee: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () fromAcct: Optional[str] = None toAcct: Optional[str] = None @dataclass(frozen=True) class SLBActivity(FlexElement): """ Wrapped in <SLBActivities> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None date: Optional[datetime.date] = None slbTransactionId: Optional[str] = None activityDescription: Optional[str] = None type: Optional[str] = None exchange: Optional[str] = None quantity: Optional[decimal.Decimal] = None feeRate: Optional[decimal.Decimal] = None collateralAmount: Optional[decimal.Decimal] = None markQuantity: Optional[decimal.Decimal] = None markPriorPrice: Optional[decimal.Decimal] = None markCurrentPrice: Optional[decimal.Decimal] = None @dataclass(frozen=True) class SLBFee: """ Wrapped in <SLBFees> """ accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[str] = None assetCategory: Optional[str] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None valueDate: Optional[datetime.date] = None startDate: Optional[datetime.date] = None type: Optional[str] = None # FIXME exchange: Optional[str] = None quantity: Optional[decimal.Decimal] = None collateralAmount: Optional[decimal.Decimal] = None feeRate: Optional[decimal.Decimal] = None fee: Optional[decimal.Decimal] = None carryCharge: Optional[decimal.Decimal] = None ticketCharge: Optional[decimal.Decimal] = None totalCharges: Optional[decimal.Decimal] = None marketFeeRate: Optional[decimal.Decimal] = None grossLendFee: Optional[decimal.Decimal] = None netLendFeeRate: Optional[decimal.Decimal] = None netLendFee: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () fromAcct: Optional[str] = None toAcct: Optional[str] = None @dataclass(frozen=True) class Transfer(FlexElement): """ Wrapped in <Transfers> """ type: Optional[enums.TransferType] = None direction: Optional[enums.InOut] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None reportDate: Optional[datetime.date] = None underlyingConid: Optional[str] = None date: Optional[datetime.date] = None dateTime: Optional[datetime.datetime] = None account: Optional[str] = None quantity: Optional[decimal.Decimal] = None transferPrice: Optional[decimal.Decimal] = None positionAmount: Optional[decimal.Decimal] = None positionAmountInBase: Optional[decimal.Decimal] = None capitalGainsPnl: Optional[decimal.Decimal] = None cashTransfer: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () clientReference: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None sedol: Optional[str] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None company: Optional[str] = None accountName: Optional[str] = None pnlAmount: Optional[decimal.Decimal] = None pnlAmountInBase: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None transactionID: Optional[str] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class UnsettledTransfer(FlexElement): """ Wrapped in <UnsettledTransfers> """ direction: Optional[enums.ToFrom] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None sedol: Optional[str] = None underlyingConid: Optional[str] = None stage: Optional[str] = None tradeDate: Optional[datetime.date] = None targetSettlement: Optional[datetime.date] = None contra: Optional[str] = None quantity: Optional[decimal.Decimal] = None tradePrice: Optional[decimal.Decimal] = None tradeAmount: Optional[decimal.Decimal] = None tradeAmountInBase: Optional[decimal.Decimal] = None transactionID: Optional[str] = None @dataclass(frozen=True) class PriorPeriodPosition(FlexElement): """ Wrapped in <PriorPeriodPositions> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None priorMtmPnl: Optional[decimal.Decimal] = None date: Optional[datetime.date] = None price: Optional[decimal.Decimal] = None acctAlias: Optional[str] = None model: Optional[str] = None sedol: Optional[str] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None @dataclass(frozen=True) class CorporateAction(FlexElement): """ Wrapped in <CorporateActions> """ assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None actionDescription: Optional[str] = None dateTime: Optional[datetime.datetime] = None amount: Optional[decimal.Decimal] = None quantity: Optional[decimal.Decimal] = None fifoPnlRealized: Optional[decimal.Decimal] = None capitalGainsPnl: Optional[decimal.Decimal] = None fxPnl: Optional[decimal.Decimal] = None mtmPnl: Optional[decimal.Decimal] = None # Effective 2010, CorporateAction has a `type` attribute type: Optional[enums.Reorg] = None code: Tuple[enums.Code, ...] = () sedol: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None reportDate: Optional[datetime.date] = None proceeds: Optional[decimal.Decimal] = None value: Optional[decimal.Decimal] = None transactionID: Optional[str] = None @dataclass(frozen=True) class CashTransaction(FlexElement): """ Wrapped in <CashTransactions> """ type: Optional[enums.CashAction] = None assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None amount: Optional[decimal.Decimal] = None dateTime: Optional[datetime.datetime] = None sedol: Optional[str] = None symbol: Optional[str] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None tradeID: Optional[str] = None code: Tuple[enums.Code, ...] = () transactionID: Optional[str] = None reportDate: Optional[datetime.date] = None clientReference: Optional[str] = None settleDate: Optional[datetime.date] = None acctAlias: Optional[str] = None model: Optional[str] = None levelOfDetail: Optional[str] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class DebitCardActivity(FlexElement): """ Wrapped in <DebitCardActivities> """ accountId: Optional[str] = None acctAlias: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None assetCategory: Optional[enums.AssetClass] = None status: Optional[str] = None reportDate: Optional[datetime.date] = None postingDate: Optional[datetime.date] = None transactionDateTime: Optional[datetime.datetime] = None category: Optional[str] = None merchantNameLocation: Optional[str] = None amount: Optional[decimal.Decimal] = None model: Optional[str] = None @dataclass(frozen=True) class ChangeInDividendAccrual(FlexElement): """ Wrapped in <ChangeInDividendAccruals> """ date: Optional[datetime.date] = None assetCategory: Optional[enums.AssetClass] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None accountId: Optional[str] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None sedol: Optional[str] = None listingExchange: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None reportDate: Optional[datetime.date] = None underlyingConid: Optional[str] = None exDate: Optional[datetime.date] = None payDate: Optional[datetime.date] = None quantity: Optional[decimal.Decimal] = None tax: Optional[decimal.Decimal] = None fee: Optional[decimal.Decimal] = None grossRate: Optional[decimal.Decimal] = None grossAmount: Optional[decimal.Decimal] = None netAmount: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None fromAcct: Optional[str] = None toAcct: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None # Type alias to work around https://github.com/python/mypy/issues/1775 _ChangeInDividendAccrual = ChangeInDividendAccrual @dataclass(frozen=True) class OpenDividendAccrual(FlexElement): """ Wrapped in <OpenDividendAccruals> """ assetCategory: Optional[enums.AssetClass] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None accountId: Optional[str] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None exDate: Optional[datetime.date] = None payDate: Optional[datetime.date] = None quantity: Optional[decimal.Decimal] = None tax: Optional[decimal.Decimal] = None fee: Optional[decimal.Decimal] = None grossRate: Optional[decimal.Decimal] = None grossAmount: Optional[decimal.Decimal] = None netAmount: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () sedol: Optional[str] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None fromAcct: Optional[str] = None toAcct: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class SecurityInfo(FlexElement): """ Wrapped in <SecuritiesInfo> """ assetCategory: Optional[enums.AssetClass] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingCategory: Optional[str] = None subCategory: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None maturity: Optional[str] = None issueDate: Optional[datetime.date] = None type: Optional[str] = None sedol: Optional[str] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () currency: Optional[str] = None settlementPolicyMethod: Optional[str] = None @dataclass(frozen=True) class ConversionRate(FlexElement): """ Wrapped in <ConversionRates> """ reportDate: Optional[datetime.date] = None fromCurrency: Optional[str] = None toCurrency: Optional[str] = None rate: Optional[decimal.Decimal] = None @dataclass(frozen=True) class FIFOPerformanceSummaryUnderlying(FlexElement): accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None listingExchange: Optional[str] = None assetCategory: Optional[enums.AssetClass] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None realizedSTProfit: Optional[decimal.Decimal] = None realizedSTLoss: Optional[decimal.Decimal] = None realizedLTProfit: Optional[decimal.Decimal] = None realizedLTLoss: Optional[decimal.Decimal] = None totalRealizedPnl: Optional[decimal.Decimal] = None unrealizedProfit: Optional[decimal.Decimal] = None unrealizedLoss: Optional[decimal.Decimal] = None totalUnrealizedPnl: Optional[decimal.Decimal] = None totalFifoPnl: Optional[decimal.Decimal] = None totalRealizedCapitalGainsPnl: Optional[decimal.Decimal] = None totalRealizedFxPnl: Optional[decimal.Decimal] = None totalUnrealizedCapitalGainsPnl: Optional[decimal.Decimal] = None totalUnrealizedFxPnl: Optional[decimal.Decimal] = None totalCapitalGainsPnl: Optional[decimal.Decimal] = None totalFxPnl: Optional[decimal.Decimal] = None transferredPnl: Optional[decimal.Decimal] = None transferredCapitalGainsPnl: Optional[decimal.Decimal] = None transferredFxPnl: Optional[decimal.Decimal] = None sedol: Optional[str] = None securityIDType: Optional[str] = None underlyingSymbol: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None reportDate: Optional[datetime.date] = None unrealizedSTProfit: Optional[decimal.Decimal] = None unrealizedSTLoss: Optional[decimal.Decimal] = None unrealizedLTProfit: Optional[decimal.Decimal] = None unrealizedLTLoss: Optional[decimal.Decimal] = None costAdj: Optional[decimal.Decimal] = None code: Tuple[enums.Code, ...] = () serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class NetStockPosition(FlexElement): assetCategory: Optional[enums.AssetClass] = None accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None sedol: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None reportDate: Optional[datetime.date] = None sharesAtIb: Optional[decimal.Decimal] = None sharesBorrowed: Optional[decimal.Decimal] = None sharesLent: Optional[decimal.Decimal] = None netShares: Optional[decimal.Decimal] = None serialNumber: Optional[str] = None deliveryType: Optional[str] = None commodityType: Optional[str] = None fineness: Optional[decimal.Decimal] = None weight: Optional[str] = None @dataclass(frozen=True) class ClientFee(FlexElement): accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None feeType: Optional[str] = None date: Optional[datetime.datetime] = None description: Optional[str] = None expenseIndicator: Optional[str] = None revenue: Optional[decimal.Decimal] = None expense: Optional[decimal.Decimal] = None net: Optional[decimal.Decimal] = None revenueInBase: Optional[decimal.Decimal] = None expenseInBase: Optional[decimal.Decimal] = None netInBase: Optional[decimal.Decimal] = None tradeID: Optional[str] = None execID: Optional[str] = None levelOfDetail: Optional[str] = None @dataclass(frozen=True) class ClientFeesDetail(FlexElement): accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None date: Optional[datetime.datetime] = None tradeID: Optional[str] = None execID: Optional[str] = None totalRevenue: Optional[decimal.Decimal] = None totalCommission: Optional[decimal.Decimal] = None brokerExecutionCharge: Optional[decimal.Decimal] = None clearingCharge: Optional[decimal.Decimal] = None thirdPartyExecutionCharge: Optional[decimal.Decimal] = None thirdPartyRegulatoryCharge: Optional[decimal.Decimal] = None regFINRATradingActivityFee: Optional[decimal.Decimal] = None regSection31TransactionFee: Optional[decimal.Decimal] = None regOther: Optional[decimal.Decimal] = None totalNet: Optional[decimal.Decimal] = None totalNetInBase: Optional[decimal.Decimal] = None levelOfDetail: Optional[str] = None other: Optional[decimal.Decimal] = None @dataclass(frozen=True) class TransactionTax(FlexElement): accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None assetCategory: Optional[enums.AssetClass] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None date: Optional[datetime.datetime] = None taxDescription: Optional[str] = None quantity: Optional[decimal.Decimal] = None reportDate: Optional[datetime.date] = None taxAmount: Optional[decimal.Decimal] = None tradeId: Optional[str] = None tradePrice: Optional[decimal.Decimal] = None source: Optional[str] = None code: Tuple[enums.Code, ...] = () levelOfDetail: Optional[str] = None @dataclass(frozen=True) class TransactionTaxDetail(FlexElement): accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None assetCategory: Optional[enums.AssetClass] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None date: Optional[datetime.datetime] = None taxDescription: Optional[str] = None quantity: Optional[decimal.Decimal] = None reportDate: Optional[datetime.date] = None taxAmount: Optional[decimal.Decimal] = None tradeId: Optional[str] = None tradePrice: Optional[decimal.Decimal] = None source: Optional[str] = None code: Tuple[enums.Code, ...] = () levelOfDetail: Optional[str] = None @dataclass(frozen=True) class SalesTax(FlexElement): accountId: Optional[str] = None acctAlias: Optional[str] = None model: Optional[str] = None currency: Optional[str] = None fxRateToBase: Optional[decimal.Decimal] = None assetCategory: Optional[enums.AssetClass] = None symbol: Optional[str] = None description: Optional[str] = None conid: Optional[str] = None securityID: Optional[str] = None securityIDType: Optional[str] = None cusip: Optional[str] = None isin: Optional[str] = None listingExchange: Optional[str] = None underlyingConid: Optional[str] = None underlyingSecurityID: Optional[str] = None underlyingSymbol: Optional[str] = None underlyingListingExchange: Optional[str] = None issuer: Optional[str] = None multiplier: Optional[decimal.Decimal] = None strike: Optional[decimal.Decimal] = None expiry: Optional[datetime.date] = None putCall: Optional[enums.PutCall] = None principalAdjustFactor: Optional[decimal.Decimal] = None date: Optional[datetime.date] = None country: Optional[str] = None taxType: Optional[str] = None payer: Optional[str] = None taxableDescription: Optional[str] = None taxableAmount: Optional[decimal.Decimal] = None taxRate: Optional[decimal.Decimal] = None salesTax: Optional[decimal.Decimal] = None taxableTransactionID: Optional[str] = None transactionID: Optional[str] = None code: Tuple[enums.Code, ...] = () # Type alias to work around https://github.com/python/mypy/issues/1775 _ClientFeesDetail = ClientFeesDetail
import os import aiofiles import webbrowser import json as stdlib_json from sanic import Sanic, response from sanic.exceptions import abort from sanic.response import json from pyfy import AsyncSpotify, ClientCreds, AuthError try: from spt_keys import KEYS except: # noqa: E722 from spt_keys_template import KEYS app = Sanic(__name__) local_address = "localhost" local_port = "5000" local_full_address = local_address + ":" + str(local_port) spt = AsyncSpotify() client = ClientCreds() state = "123" @app.route("/authorize") def authorize(request): export_keys() client.load_from_env() spt.client_creds = client if spt.is_oauth_ready: return response.redirect(spt.auth_uri(state=state)) else: return ( json( { "error_description": "Client needs client_id, client_secret and a redirect uri in order to handle OAauth properly" } ), 500, ) @app.route("/callback/spotify") # You have to register this callback async def spotify_callback(request): if request.args.get("error"): return json(dict(error=request.args.get("error_description"))) elif request.args.get("code"): grant = request.args.get("code") callback_state = request.args.get("state") if callback_state != state: abort(401) try: user_creds = await spt.build_user_creds(grant=grant) async with aiofiles.open(os.getcwd() + "SPOTIFY_CREDS.json", "w") as file: await file.write(stdlib_json.dumps(user_creds.__dict__)) except AuthError as e: return json(dict(error_description=e.msg, error_code=e.code), e.code) else: await spt.populate_user_creds() print(os.getcwd()) return await response.file(os.getcwd() + "SPOTIFY_CREDS.json") # return response.json(dict(user_creds=user_creds.__dict__, check_if_active=app.url_for('is_active', _scheme='http', _external=True, _server=local_full_address)), 200) else: return response.text("Something is wrong with your callback") @app.route("/is_active") async def is_active(request): return json( dict( is_active=await spt.is_active, your_tracks=app.url_for( "tracks", _scheme="http", _external=True, _server=local_full_address ), your_playlists=app.url_for( "playlists", _scheme="http", _external=True, _server=local_full_address ), ) ) @app.route("/dump_creds") def dump_creds(request): # TODO: save both client and user creds and send to user as json files to downlaod return response.text("Not Implemented") @app.route("/") def index(request): return response.text("OK") @app.route("/tracks") async def tracks(request): return json(await spt.user_tracks()) @app.route("/playlists") async def playlists(request): return json(await spt.user_playlists()) def export_keys(): for k, v in KEYS.items(): if v: os.environ[k] = v print("export " + k + "=" + v) if __name__ == "__main__": webbrowser.open_new_tab("http://" + local_full_address + "/authorize") app.run(host=local_address, port=str(local_port), debug=True)
""" This test will initialize the display using displayio and draw a solid green background, a smaller purple rectangle, and some yellow text. All drawing is done using native displayio modules. Pinouts are for the 2.4" TFT FeatherWing or Breakout with a Feather M4 or M0. """ import board import terminalio import displayio from adafruit_display_text import label import adafruit_ili9341 # Release any resources currently in use for the displays displayio.release_displays() spi = board.SPI() tft_cs = board.D9 tft_dc = board.D10 display_bus = displayio.FourWire( spi, command=tft_dc, chip_select=tft_cs, reset=board.D6 ) display = adafruit_ili9341.ILI9341(display_bus, width=320, height=240) # Make the display context splash = displayio.Group(max_size=10) display.show(splash) # Draw a green background color_bitmap = displayio.Bitmap(320, 240, 1) color_palette = displayio.Palette(1) color_palette[0] = 0x00FF00 # Bright Green bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0) splash.append(bg_sprite) # Draw a smaller inner rectangle inner_bitmap = displayio.Bitmap(280, 200, 1) inner_palette = displayio.Palette(1) inner_palette[0] = 0xAA0088 # Purple inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=20, y=20) splash.append(inner_sprite) # Draw a label text_group = displayio.Group(max_size=10, scale=3, x=57, y=120) text = "Hello World!" text_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00) text_group.append(text_area) # Subgroup for text scaling splash.append(text_group) while True: pass
#!/usr/bin/env python3 import argparse import os import subprocess import sys def setup(): global args, workdir programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget'] if args.kvm: programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils'] elif args.docker: dockers = ['docker.io', 'docker-ce'] for i in dockers: return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i]) if return_code == 0: break if return_code != 0: print('Cannot find any way to install docker', file=sys.stderr) exit(1) else: programs += ['lxc', 'debootstrap'] subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) if not os.path.isdir('gitian.sigs.ltc'): subprocess.check_call(['git', 'clone', 'https://github.com/cbreezycoin-project/gitian.sigs.ltc.git']) if not os.path.isdir('cbreezycoin-detached-sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/cbreezycoin-project/cbreezycoin-detached-sigs.git']) if not os.path.isdir('gitian-builder'): subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) if not os.path.isdir('cbreezycoin'): subprocess.check_call(['git', 'clone', 'https://github.com/cbreezycoin-project/cbreezycoin.git']) os.chdir('gitian-builder') make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64'] if args.docker: make_image_prog += ['--docker'] elif not args.kvm: make_image_prog += ['--lxc'] subprocess.check_call(make_image_prog) os.chdir(workdir) if args.is_bionic and not args.kvm and not args.docker: subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) print('Reboot is required') exit(0) def build(): global args, workdir os.makedirs('cbreezycoin-binaries/' + args.version, exist_ok=True) print('\nBuilding Dependencies\n') os.chdir('gitian-builder') os.makedirs('inputs', exist_ok=True) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz']) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch']) subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True) subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True) subprocess.check_call(['make', '-C', '../cbreezycoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) if args.linux: print('\nCompiling ' + args.version + ' Linux') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cbreezycoin='+args.commit, '--url', 'cbreezycoin='+args.url, '../cbreezycoin/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call('mv build/out/cbreezycoin-*.tar.gz build/out/src/cbreezycoin-*.tar.gz ../cbreezycoin-binaries/'+args.version, shell=True) if args.windows: print('\nCompiling ' + args.version + ' Windows') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cbreezycoin='+args.commit, '--url', 'cbreezycoin='+args.url, '../cbreezycoin/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call('mv build/out/cbreezycoin-*-win-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/cbreezycoin-*.zip build/out/cbreezycoin-*.exe ../cbreezycoin-binaries/'+args.version, shell=True) if args.macos: print('\nCompiling ' + args.version + ' MacOS') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cbreezycoin='+args.commit, '--url', 'cbreezycoin='+args.url, '../cbreezycoin/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call('mv build/out/cbreezycoin-*-osx-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/cbreezycoin-*.tar.gz build/out/cbreezycoin-*.dmg ../cbreezycoin-binaries/'+args.version, shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Unsigned Sigs\n') os.chdir('gitian.sigs.ltc') subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer]) subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer]) os.chdir(workdir) def sign(): global args, workdir os.chdir('gitian-builder') if args.windows: print('\nSigning ' + args.version + ' Windows') subprocess.check_call('cp inputs/cbreezycoin-' + args.version + '-win-unsigned.tar.gz inputs/cbreezycoin-win-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../cbreezycoin/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call('mv build/out/cbreezycoin-*win64-setup.exe ../cbreezycoin-binaries/'+args.version, shell=True) subprocess.check_call('mv build/out/cbreezycoin-*win32-setup.exe ../cbreezycoin-binaries/'+args.version, shell=True) if args.macos: print('\nSigning ' + args.version + ' MacOS') subprocess.check_call('cp inputs/cbreezycoin-' + args.version + '-osx-unsigned.tar.gz inputs/cbreezycoin-osx-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../cbreezycoin/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call('mv build/out/cbreezycoin-osx-signed.dmg ../cbreezycoin-binaries/'+args.version+'/cbreezycoin-'+args.version+'-osx.dmg', shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Signed Sigs\n') os.chdir('gitian.sigs.ltc') subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer]) subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer]) os.chdir(workdir) def verify(): global args, workdir os.chdir('gitian-builder') print('\nVerifying v'+args.version+' Linux\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-linux', '../cbreezycoin/contrib/gitian-descriptors/gitian-linux.yml']) print('\nVerifying v'+args.version+' Windows\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-win-unsigned', '../cbreezycoin/contrib/gitian-descriptors/gitian-win.yml']) print('\nVerifying v'+args.version+' MacOS\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-osx-unsigned', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx.yml']) print('\nVerifying v'+args.version+' Signed Windows\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-win-signed', '../cbreezycoin/contrib/gitian-descriptors/gitian-win-signer.yml']) print('\nVerifying v'+args.version+' Signed MacOS\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-osx-signed', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx-signer.yml']) os.chdir(workdir) def main(): global args, workdir parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version') parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch') parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request') parser.add_argument('-u', '--url', dest='url', default='https://github.com/cbreezycoin-project/cbreezycoin', help='Specify the URL of the repository. Default is %(default)s') parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build') parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build') parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS') parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries') parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s') parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s') parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC') parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC') parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)') parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.') parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git') parser.add_argument('signer', help='GPG signer to sign each build assert file') parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') args = parser.parse_args() workdir = os.getcwd() args.linux = 'l' in args.os args.windows = 'w' in args.os args.macos = 'm' in args.os args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) if args.buildsign: args.build=True args.sign=True if args.kvm and args.docker: raise Exception('Error: cannot have both kvm and docker') args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' # Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker if args.docker: os.environ['USE_DOCKER'] = '1' elif not args.kvm: os.environ['USE_LXC'] = '1' if not 'GITIAN_HOST_IP' in os.environ.keys(): os.environ['GITIAN_HOST_IP'] = '10.0.3.1' if not 'LXC_GUEST_IP' in os.environ.keys(): os.environ['LXC_GUEST_IP'] = '10.0.3.5' # Disable for MacOS if no SDK found if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'): print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') args.macos = False script_name = os.path.basename(sys.argv[0]) # Signer and version shouldn't be empty if args.signer == '': print(script_name+': Missing signer.') print('Try '+script_name+' --help for more information') exit(1) if args.version == '': print(script_name+': Missing version.') print('Try '+script_name+' --help for more information') exit(1) # Add leading 'v' for tags if args.commit and args.pull: raise Exception('Cannot have both commit and pull') args.commit = ('' if args.commit else 'v') + args.version if args.setup: setup() os.chdir('cbreezycoin') if args.pull: subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) os.chdir('../gitian-builder/inputs/cbreezycoin') subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip() args.version = 'pull-' + args.version print(args.commit) subprocess.check_call(['git', 'fetch']) subprocess.check_call(['git', 'checkout', args.commit]) os.chdir(workdir) if args.build: build() if args.sign: sign() if args.verify: verify() if __name__ == '__main__': main()
############################################################################### ## ## Copyright (c) Crossbar.io Technologies GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### from case import Case class Case1_2_5(Case): DESCRIPTION = """Send binary message message with payload of length 128.""" EXPECTATION = """Receive echo'ed binary message (with payload as sent). Clean close with normal code.""" def onOpen(self): payload = "\xfe" * 128 self.expected[Case.OK] = [("message", payload, True)] self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True} self.p.sendFrame(opcode = 2, payload = payload) self.p.killAfter(1)
a = int(input()) for i in range(1,11): total = i*a print('{} x {} = {}'.format(i, a,total))
import copy import torch.nn as nn from rlkit.launchers.launcher_util import setup_logger import rlkit.torch.pytorch_util as ptu from rlkit.core.ma_eval_util import get_generic_ma_path_information def experiment(variant): num_agent = variant['num_agent'] from differential_game import DifferentialGame expl_env = DifferentialGame(game_name=args.exp_name) eval_env = DifferentialGame(game_name=args.exp_name) obs_dim = eval_env.observation_space.low.size action_dim = eval_env.action_space.low.size from rlkit.torch.networks.graph_builders import FullGraphBuilder graph_builder1 = FullGraphBuilder( input_node_dim=obs_dim+action_dim, num_node=num_agent, contain_self_loop=False) from rlkit.torch.networks.gnn_networks import GNNNet gnn1 = GNNNet( graph_builder1, node_dim=variant['qf_kwargs']['hidden_dim'], conv_type=variant['qf_kwargs']['conv_type'], num_conv_layers=1, hidden_activation='relu', output_activation='relu', ) qf1 = nn.Sequential( gnn1, nn.Linear(variant['qf_kwargs']['hidden_dim'],1) ) target_qf1 = copy.deepcopy(qf1) from rlkit.torch.networks.graph_builders import FullGraphBuilder graph_builder2 = FullGraphBuilder( input_node_dim=obs_dim+action_dim, num_node=num_agent, contain_self_loop=False) from rlkit.torch.networks.gnn_networks import GNNNet gnn2 = GNNNet( graph_builder2, node_dim=variant['qf_kwargs']['hidden_dim'], conv_type=variant['qf_kwargs']['conv_type'], num_conv_layers=1, hidden_activation='relu', output_activation='relu', ) qf2 = nn.Sequential( gnn2, nn.Linear(variant['qf_kwargs']['hidden_dim'],1) ) target_qf2 = copy.deepcopy(qf2) policy_n, eval_policy_n, expl_policy_n = [], [], [] for i in range(num_agent): from rlkit.torch.networks.layers import SplitLayer policy = nn.Sequential( nn.Linear(obs_dim,variant['policy_kwargs']['hidden_dim']), nn.ReLU(), nn.Linear(variant['policy_kwargs']['hidden_dim'],variant['policy_kwargs']['hidden_dim']), nn.ReLU(), SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim), nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)]) ) from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy policy = TanhGaussianPolicy(module=policy) from rlkit.torch.policies.make_deterministic import MakeDeterministic eval_policy = MakeDeterministic(policy) from rlkit.exploration_strategies.base import PolicyWrappedWithExplorationStrategy if variant['random_exploration']: from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy expl_policy = PolicyWrappedWithExplorationStrategy( exploration_strategy=EpsilonGreedy(expl_env.action_space, prob_random_action=1.0), policy=policy, ) else: expl_policy = policy policy_n.append(policy) eval_policy_n.append(eval_policy) expl_policy_n.append(expl_policy) from rlkit.samplers.data_collector.ma_path_collector import MAMdpPathCollector eval_path_collector = MAMdpPathCollector(eval_env, eval_policy_n) expl_path_collector = MAMdpPathCollector(expl_env, expl_policy_n) from rlkit.data_management.ma_env_replay_buffer import MAEnvReplayBuffer replay_buffer = MAEnvReplayBuffer(variant['replay_buffer_size'], expl_env, num_agent=num_agent) from rlkit.torch.masac.masac_gnn import MASACGNNTrainer trainer = MASACGNNTrainer( env = expl_env, qf1=qf1, target_qf1=target_qf1, qf2=qf2, target_qf2=target_qf2, policy_n=policy_n, **variant['trainer_kwargs'] ) from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm algorithm = TorchBatchRLAlgorithm( trainer=trainer, exploration_env=expl_env, evaluation_env=eval_env, exploration_data_collector=expl_path_collector, evaluation_data_collector=eval_path_collector, replay_buffer=replay_buffer, log_path_function=get_generic_ma_path_information, **variant['algorithm_kwargs'] ) algorithm.to(ptu.device) algorithm.train() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--exp_name', type=str, default='zero_sum') parser.add_argument('--log_dir', type=str, default='MASACGNNGaussian') parser.add_argument('--conv', type=str, default='GSage') parser.add_argument('--hidden', type=int, default=16) parser.add_argument('--oa', action='store_true', default=False) # online action parser.add_argument('--snl', action='store_true', default=False) # sum n loss parser.add_argument('--re', action='store_true', default=False) # random exploration parser.add_argument('--alpha', type=float, default=None) # init alpha parser.add_argument('--fa', action='store_true', default=False) # fix alpha parser.add_argument('--lr', type=float, default=None) parser.add_argument('--bs', type=int, default=None) parser.add_argument('--epoch', type=int, default=None) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--snapshot_mode', type=str, default="gap_and_last") parser.add_argument('--snapshot_gap', type=int, default=500) args = parser.parse_args() import os.path as osp pre_dir = './Data/'+args.exp_name main_dir = args.log_dir\ +args.conv\ +('hidden'+str(args.hidden))\ +('oa' if args.oa else '')\ +('snl' if args.snl else '')\ +('re' if args.re else '')\ +(('alpha'+str(args.alpha)) if args.alpha else '')\ +('fa' if args.fa else '')\ +(('lr'+str(args.lr)) if args.lr else '')\ +(('bs'+str(args.bs)) if args.bs else '') log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed)) # noinspection PyTypeChecker variant = dict( num_agent=2, random_exploration=args.re, algorithm_kwargs=dict( num_epochs=(args.epoch if args.epoch else 100), num_eval_steps_per_epoch=100, num_trains_per_train_loop=100, num_expl_steps_per_train_loop=100, min_num_steps_before_training=100, max_path_length=100, batch_size=(args.bs if args.bs else 256), ), trainer_kwargs=dict( use_soft_update=True, tau=1e-2, discount=0.99, qf_learning_rate=(args.lr if args.lr else 1e-3), policy_learning_rate=(args.lr if args.lr else 1e-4), online_action=args.oa, sum_n_loss=args.snl, init_alpha=(args.alpha if args.alpha else 1.), use_automatic_entropy_tuning=(not args.fa), ), qf_kwargs=dict( conv_type=args.conv, hidden_dim=args.hidden, ), policy_kwargs=dict( hidden_dim=args.hidden, ), replay_buffer_size=int(1E6), ) import os if not os.path.isdir(log_dir): os.makedirs(log_dir) with open(osp.join(log_dir,'variant.json'),'w') as out_json: import json json.dump(variant,out_json,indent=2) import sys cmd_input = 'python ' + ' '.join(sys.argv) + '\n' with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f: f.write(cmd_input) setup_logger(args.exp_name+'/'+main_dir, variant=variant, snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap, log_dir=log_dir) import numpy as np import torch np.random.seed(args.seed) torch.manual_seed(args.seed) # ptu.set_gpu_mode(True) # optionally set the GPU (default=False) experiment(variant)
from django.urls import NoReverseMatch from django.utils import html from django.utils.translation import ugettext as _ from couchdbkit import ResourceNotFound from casexml.apps.case.models import CommCareCaseAction from corehq.apps.case_search.const import ( CASE_COMPUTED_METADATA, SPECIAL_CASE_PROPERTIES, SPECIAL_CASE_PROPERTIES_MAP, ) from corehq.apps.es.case_search import flatten_result from corehq.apps.groups.models import Group from corehq.apps.locations.models import SQLLocation from corehq.apps.reports.v2.models import BaseDataFormatter from corehq.apps.reports.v2.utils import report_date_to_json from corehq.apps.users.models import CouchUser from corehq.util.quickcache import quickcache from corehq.util.timezones.utils import parse_date from corehq.util.view_utils import absolute_reverse class CaseDataFormatter(BaseDataFormatter): def __init__(self, request, domain, raw_data): super(CaseDataFormatter, self).__init__(request, domain, raw_data) self.raw_data = flatten_result(raw_data) @property def owner_id(self): """Special Case Property @owner_id""" if 'owner_id' in self.raw_data: return self.raw_data.get('owner_id') elif 'user_id' in self.raw_data: return self.raw_data.gert('user_id') else: return '' @property def date_opened(self): """Special Case Property date_opened""" return self._fmt_dateprop('opened_on', False) @property def last_modified(self): """Special Case Property last_modified""" return self._fmt_dateprop('modified_on', False) @property def closed_by_username(self): """Computed metadata""" return self._get_username(self.closed_by_user_id) @property def last_modified_by_user_username(self): """Computed metadata""" return self._get_username(self.raw_data.get('user_id')) @property def opened_by_username(self): """Computed metadata""" user = self._creating_user if user is None: return _("No Data") return user['name'] or self._user_not_found_display(user['id']) @property def owner_name(self): """Computed metadata""" owner_type, owner = self._owner if owner_type == 'group': return '<span class="label label-default">%s</span>' % owner['name'] return owner['name'] @property def closed_by_user_id(self): """Computed metadata""" return self.raw_data.get('closed_by') @property def opened_by_user_id(self): """Computed metadata""" user = self._creating_user if user is None: return _("No data") return user['id'] @property def server_last_modified_date(self): """Computed metadata""" return self._fmt_dateprop('server_modified_on', False) def get_context(self): context = {} context.update(self.raw_data) context.update(self._case_info_context) context['_link'] = self._link return context @property def _link(self): try: return absolute_reverse( 'case_data', args=[self.domain, self.raw_data.get('_id')] ) except NoReverseMatch: return None @property def _case_info_context(self): context = {} for prop in SPECIAL_CASE_PROPERTIES + CASE_COMPUTED_METADATA: context[prop] = self._get_case_info_prop(prop) return context def _get_case_info_prop(self, prop): fmt_prop = prop.replace('@', '') if hasattr(self, fmt_prop): return getattr(self, fmt_prop) elif prop in SPECIAL_CASE_PROPERTIES: return self._get_special_property(prop) raise NotImplementedError( "CaseDataFormatter.{} not found".format(prop)) def _get_special_property(self, prop): return (SPECIAL_CASE_PROPERTIES_MAP[prop] .value_getter(self.raw_data)) def _fmt_dateprop(self, prop, iso=True): val = report_date_to_json( self.request, self.domain, parse_date(self.raw_data[prop]) ) if iso: val = 'T'.join(val.split(' ')) if val else None return val @property @quickcache(['self.owner_id']) def _owning_group(self): try: return Group.get(self.owner_id) except ResourceNotFound: return None @property @quickcache(['self.owner_id']) def _location(self): return SQLLocation.objects.get_or_None(location_id=self.owner_id) @property @quickcache(['self.owner_id']) def _owner(self): if self._owning_group and self._owning_group.name: return ('group', {'id': self._owning_group._id, 'name': self._owning_group.name}) elif self._location: return ('location', {'id': self._location.location_id, 'name': self._location.display_name}) return ('user', self._user_meta(self.owner_id)) @property def _creating_user(self): try: creator_id = self.raw_data['opened_by'] except KeyError: creator_id = None if 'actions' in self.raw_data: for action in self.raw_data['actions']: if action['action_type'] == 'create': action_doc = CommCareCaseAction.wrap(action) creator_id = action_doc.get_user_id() break if not creator_id: return None return self._user_meta(creator_id) def _user_meta(self, user_id): return {'id': user_id, 'name': self._get_username(user_id)} def _user_not_found_display(self, user_id): return _("Unknown [%s]") % user_id @quickcache(['user_id']) def _get_username(self, user_id): if not user_id: return None try: user = CouchUser.get_by_user_id(user_id) if user: return user.username except CouchUser.AccountTypeError: return None
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE from nose import SkipTest import numpy as np from numpy.testing import assert_allclose, assert_raises, assert_equal from scipy.sparse import isspmatrix from scipy.spatial.distance import cdist, pdist, squareform from megaman.geometry import (Geometry, compute_adjacency_matrix, Adjacency, adjacency_methods) try: import pyflann as pyf NO_PYFLANN = False except ImportError: NO_PYFLANN = True def test_adjacency_methods(): assert_equal(set(adjacency_methods()), {'auto', 'pyflann', 'ball_tree', 'cyflann', 'brute', 'kd_tree'}) def test_adjacency_input_validation(): X = np.random.rand(20, 3) # need to specify radius or n_neighbors assert_raises(ValueError, compute_adjacency_matrix, X) # cannot specify both radius and n_neighbors assert_raises(ValueError, compute_adjacency_matrix, X, radius=1, n_neighbors=10) def test_adjacency(): rng = np.random.RandomState(36) X = rng.rand(100, 3) Gtrue = {} exact_methods = [m for m in Adjacency.methods() if not m.endswith('flann')] def check_kneighbors(n_neighbors, method): if method == 'pyflann' and NO_PYFLANN: raise SkipTest("pyflann not installed") G = compute_adjacency_matrix(X, method=method, n_neighbors=n_neighbors) assert isspmatrix(G) assert G.shape == (X.shape[0], X.shape[0]) if method in exact_methods: assert_allclose(G.toarray(), Gtrue[n_neighbors].toarray()) def check_radius(radius, method): if method == 'pyflann' and NO_PYFLANN: raise SkipTest("pyflann not installed") G = compute_adjacency_matrix(X, method=method, radius=radius) assert isspmatrix(G) assert G.shape == (X.shape[0], X.shape[0]) if method in exact_methods: assert_allclose(G.toarray(), Gtrue[radius].toarray()) for n_neighbors in [5, 10, 15]: Gtrue[n_neighbors] = compute_adjacency_matrix(X, method='brute', n_neighbors=n_neighbors) for method in Adjacency.methods(): yield check_kneighbors, n_neighbors, method for radius in [0.1, 0.5, 1.0]: Gtrue[radius] = compute_adjacency_matrix(X, method='brute', radius=radius) for method in Adjacency.methods(): yield check_radius, radius, method def test_unknown_method(): X = np.arange(20).reshape((10, 2)) assert_raises(ValueError, compute_adjacency_matrix, X, 'foo') def test_all_methods_close(): rand = np.random.RandomState(36) X = rand.randn(10, 2) D_true = squareform(pdist(X)) D_true[D_true > 0.5] = 0 def check_method(method): kwargs = {} if method == 'pyflann': try: import pyflann as pyf except ImportError: raise SkipTest("pyflann not installed.") flindex = pyf.FLANN() flindex.build_index(X, algorithm='kmeans', target_precision=0.9) kwargs['flann_index'] = flindex this_D = compute_adjacency_matrix(X, method=method, radius=0.5, **kwargs) assert_allclose(this_D.toarray(), D_true, rtol=1E-5) for method in ['auto', 'cyflann', 'pyflann', 'brute']: yield check_method, method def test_custom_adjacency(): class CustomAdjacency(Adjacency): name = "custom" def adjacency_graph(self, X): return squareform(pdist(X)) rand = np.random.RandomState(42) X = rand.rand(10, 2) D = compute_adjacency_matrix(X, method='custom', radius=1) assert_allclose(D, cdist(X, X)) Adjacency._remove_from_registry("custom") def test_cyflann_index_type(): rand = np.random.RandomState(36) X = rand.randn(10, 2) D_true = squareform(pdist(X)) D_true[D_true > 1.5] = 0 def check_index_type(index_type): method = 'cyflann' radius = 1.5 cyflann_kwds = {'index_type':index_type} adjacency_kwds = {'radius':radius, 'cyflann_kwds':cyflann_kwds} this_D = compute_adjacency_matrix(X=X, method = 'cyflann', **adjacency_kwds) assert_allclose(this_D.toarray(), D_true, rtol=1E-5, atol=1E-5) for index_type in ['kmeans', 'kdtrees']: yield check_index_type, index_type
import numpy as np import urllib2 as ulib import csv import time if __name__ == '__main__': start_time = time.time() outDir = "Fasta/" listDir = "protein.csv" urlDomain = "http://www.uniprot.org/uniprot/" protList = [] it = 0 #flag for parsing and counter for download variable #Parse csv with open(listDir,'r') as f: csvContent = csv.reader(f, delimiter=',', quotechar='\"') for row in csvContent: if (it > 0): protList.append(row[2]) else: it = 1 #Pull data and write file for i in protList: # print str(it)+" dari "+str(len(protList)) connect = ulib.urlopen(urlDomain+i+".fasta") htmlContent = connect.read() with open(outDir+i+".fasta",'w') as f: f.write(htmlContent) f.close() it += 1 ####Debugging section#### print "Runtime : "+str(time.time()-start_time) #########################
""" This compat modules is a wrapper of the core os module that forbids usage of specific operations (e.g. chown, chmod, getuid) that would be harmful to the Windows file security model of Certbot. This module is intended to replace standard os module throughout certbot projects (except acme). """ # pylint: disable=function-redefined from __future__ import absolute_import # First round of wrapping: we import statically all public attributes exposed by the os module # This allows in particular to have pylint, mypy, IDEs be aware that most of os members are # available in certbot.compat.os. from os import * # type: ignore # pylint: disable=wildcard-import,unused-wildcard-import,redefined-builtin,os-module-forbidden # Second round of wrapping: we import dynamically all attributes from the os module that have not # yet been imported by the first round (static import). This covers in particular the case of # specific python 3.x versions where not all public attributes are in the special __all__ of os, # and so not in `from os import *`. import os as std_os # pylint: disable=os-module-forbidden import sys as std_sys ourselves = std_sys.modules[__name__] for attribute in dir(std_os): # Check if the attribute does not already exist in our module. It could be internal attributes # of the module (__name__, __doc__), or attributes from standard os already imported with # `from os import *`. if not hasattr(ourselves, attribute): setattr(ourselves, attribute, getattr(std_os, attribute)) # Similar to os.path, allow certbot.compat.os.path to behave as a module std_sys.modules[__name__ + '.path'] = path # Clean all remaining importables that are not from the core os module. del ourselves, std_os, std_sys # Chmod is the root of all evil for our security model on Windows. With the default implementation # of os.chmod on Windows, almost all bits on mode will be ignored, and only a general RO or RW will # be applied. The DACL, the inner mechanism to control file access on Windows, will stay on its # default definition, giving effectively at least read permissions to any one, as the default # permissions on root path will be inherit by the file (as NTFS state), and root path can be read # by anyone. So the given mode needs to be translated into a secured and not inherited DACL that # will be applied to this file using filesystem.chmod, calling internally the win32security # module to construct and apply the DACL. Complete security model to translate a POSIX mode into # a suitable DACL on Windows for Certbot can be found here: # https://github.com/certbot/certbot/issues/6356 # Basically, it states that appropriate permissions will be set for the owner, nothing for the # group, appropriate permissions for the "Everyone" group, and all permissions to the # "Administrators" group + "System" user, as they can do everything anyway. def chmod(*unused_args, **unused_kwargs): # pylint: disable=function-redefined """Method os.chmod() is forbidden""" raise RuntimeError('Usage of os.chmod() is forbidden. ' 'Use certbot.compat.filesystem.chmod() instead.') # Because of the blocking strategy on file handlers on Windows, rename does not behave as expected # with POSIX systems: an exception will be raised if dst already exists. def rename(*unused_args, **unused_kwargs): """Method os.rename() is forbidden""" raise RuntimeError('Usage of os.rename() is forbidden. ' 'Use certbot.compat.filesystem.replace() instead.') # Behavior of os.replace is consistent between Windows and Linux. However, it is not supported on # Python 2.x. So, as for os.rename, we forbid it in favor of filesystem.replace. def replace(*unused_args, **unused_kwargs): """Method os.replace() is forbidden""" raise RuntimeError('Usage of os.replace() is forbidden. ' 'Use certbot.compat.filesystem.replace() instead.')
from commons import * import os def pgp_check(): init_directory('./temp') # gpg must exist on your system status = os.system('gpg --version') if status==0: print_up('gpg is found') else: print_err('can\'t find gpg') def verify_publickey_message(pk, msg): # obtain a temp filename fn = get_random_hex_string(10) # save the public key file and the message file pkfn = f'./temp/{fn}.pk' pkbinfn = pkfn+'.gpg' msgfn = f'./temp/{fn}.msg' writefile(pkfn, pk, mode='w', encoding='utf-8') writefile(msgfn, msg, mode='w', encoding='utf-8') def cleanup(): removefile(pkfn) removefile(msgfn) removefile(pkbinfn) # remove armor status = os.system(f'gpg --dearmor {pkfn}') if status != 0: qprint('status:', status) cleanup() raise Exception('failed to dearmor the public key (there might be something wrong with your public key)') # verify status = os.system(f'gpg --no-default-keyring --keyring {pkbinfn} --verify {msgfn}') if status != 0: qprint('status:', status) cleanup() raise Exception('failed to verify the message (your public key is okay but the signature you supplied does not match the public key, or is of a wrong format)') cleanup() return True
import unittest import math from Include.Tuple import * # # Tuple Unit tests # class TestTuplePointVector(unittest.TestCase): def test_Tuple_ifWArgumentIsOneTupleIsPoint(self): self.a = Tuple(4.3, -4.2, 3.1, 1.0) self.assertEqual(self.a.x, 4.3) self.assertEqual(self.a.y, -4.2) self.assertEqual(self.a.z, 3.1) self.assertEqual(self.a.w, 1.0) self.assertEqual(self.a.get_type(), TupleTypes.POINT) self.assertNotEqual(self.a.get_type(), TupleTypes.VECTOR) def test_Tuple_ifWArgumentIsZeroTupleIsVector(self): self.a = Tuple(4.3, -4.2, 3.1, 0.0) self.assertEqual(self.a.x, 4.3) self.assertEqual(self.a.y, -4.2) self.assertEqual(self.a.z, 3.1) self.assertEqual(self.a.w, 0.0) self.assertEqual(self.a.get_type(), TupleTypes.VECTOR) self.assertNotEqual(self.a.get_type(), TupleTypes.POINT) class TestTupleArithmetic(unittest.TestCase): def test_Tuple_addTwoTuples(self): self.a1 = Tuple(3, -2, 5, 1) self.a2 = Tuple(-2, 3, 1, 0) self.result = self.a1 + self.a2 self.assertEqual(self.result, Tuple(1, 1, 6, 1)) def test_Tuple_subtractTwoPoints(self): self.p1 = point(3, 2, 1) self.p2 = point(5, 6, 7) self.result = self.p1 - self.p2 self.assertEqual(self.result, vector(-2, -4, -6)) def test_Tuple_subtractAVectorFromAPoint(self): self.p = point(3, 2, 1) self.v = vector(5, 6, 7) self.result = self.p - self.v self.assertEqual(self.result, point(-2, -4, -6)) def test_Tuple_subtractTwoVectors(self): self.v1 = vector(3, 2, 1) self.v2 = vector(5, 6, 7) self.result = self.v1 - self.v2 self.assertEqual(self.result, vector(-2, -4, -6)) def test_Tuple_subtractVectorFromZeroVector(self): self.zero = vector(0, 0, 0) self.v = vector(1, -2, 3) self.result = self.zero - self.v self.assertEqual(self.result, vector(-1, 2, -3)) def test_Tuple_negateATuple(self): self.a = Tuple(1, -2, 3, -4) self.result = -self.a self.assertEqual(self.result, Tuple(-1, 2, -3, 4)) def test_Tuple_multiplyATupleByAScalar(self): self.a = Tuple(1, -2, 3, -4) self.result = self.a * 3.5 self.assertEqual(self.result, Tuple(3.5, -7, 10.5, -14)) def test_Tuple_multiplyATupleByAFraction(self): self.a = Tuple(1, -2, 3, -4) self.result = self.a * 0.5 self.assertEqual(self.result, Tuple(0.5, -1, 1.5, -2)) def test_Tuple_divideATupleByAScalar(self): self.a = Tuple(1, -2, 3, -4) self.result = self.a / 2 self.assertEqual(self.result, Tuple(0.5, -1, 1.5, -2)) class TestTupleMagnitude(unittest.TestCase): def test_Tuple_computeTheMagnitudeWithVectorXComponentOne(self): self.v = vector(1, 0, 0) self.result = self.v.magnitude() self.assertEqual(self.result, 1) def test_Tuple_computeTheMagnitudeWithVectorYComponentOne(self): self.v = vector(0, 1, 0) self.result = self.v.magnitude() self.assertEqual(self.result, 1) def test_Tuple_computeTheMagnitudeWithVectorZComponentOne(self): self.v = vector(0, 0, 1) self.result = self.v.magnitude() self.assertEqual(self.result, 1) def test_Tuple_computeTheMagnitudeWithVectorOneTwoThree(self): self.v = vector(1, 2, 3) self.result = self.v.magnitude() self.assertEqual(self.result, math.sqrt(14)) def test_Tuple_computeTheMagnitudeWithVectorMinusOneTwoThree(self): self.v = vector(-1, -2, -3) self.result = self.v.magnitude() self.assertEqual(self.result, math.sqrt(14)) class TestTupleNormalize(unittest.TestCase): def test_Tuple_normalizeVectorWithXAsFour(self): self.v = vector(4, 0, 0) self.result = self.v.normalize() self.assertEqual(self.result, vector(1, 0, 0)) def test_Tuple_normalizeVectorMinusOneTwoThree(self): self.v = vector(1, 2, 3) self.magnitude = math.sqrt(14) self.result = self.v.normalize() self.assertEqual(self.result, vector(1/self.magnitude, 2/self.magnitude, 3/self.magnitude)) def test_Tuple_computeMagnitudeOfNormalizedVector(self): self.v = vector(1, 2, 3) self.norm = self.v.normalize() self.result = self.norm.magnitude() self.assertEqual(self.result, 1) class TestTupleDotProduct(unittest.TestCase): def test_Tuple_theDotProductOfTwoTuples(self): self.a = vector(1, 2, 3) self.b = vector(2, 3, 4) self.result = self.a.dot(self.b) self.assertEqual(self.result, 20) class TestTupleCrossProduct(unittest.TestCase): def test_Tuple_theCrossProductOfTwoVectors(self): self.a = vector(1, 2, 3) self.b = vector(2, 3, 4) self.result1 = self.a.cross(self.b) self.result2 = self.b.cross(self.a) self.assertEqual(self.result1, vector(-1, 2, -1)) self.assertEqual(self.result2, vector(1, -2, 1)) # # Color Struct Unit test # class TestTupleColor(unittest.TestCase): def test_Color_createsAColor(self): self.c = Color(-0.5, 0.4, 1.7) self.assertEqual(self.c.red, -0.5) self.assertEqual(self.c.green, 0.4) self.assertEqual(self.c.blue, 1.7) def test_Color_AddColors(self): self.c1 = Color(0.9, 0.6, 0.75) self.c2 = Color(0.7, 0.1, 0.25) self.result = self.c1 + self.c2 self.assertEqual(self.result, Color(1.6, 0.7, 1.0)) def test_Color_SubtractColors(self): self.c1 = Color(0.9, 0.6, 0.75) self.c2 = Color(0.6, 0.1, 0.25) self.result = self.c1 - self.c2 self.assertEqual(self.result, Color(0.3, 0.5, 0.5)) def test_Color_MultiplyColorWithScalar(self): self.c = Color(0.2, 0.3, 0.4) self.result = self.c * 2 self.assertEqual(self.result, Color(0.4, 0.6, 0.8)) def test_Color_MultiplyingColors(self): self.c1 = Color(1, 0.2, 0.4) self.c2 = Color(0.9, 1, 0.1) self.result = self.c1 * self.c2 self.assertEqual(self.result, Color(0.9, 0.2, 0.04)) def test_Tuple_reflectingAVectorApproachingAt45Degrees(self): self.v = vector(1, -1, 0) self.n = vector(0, 1, 0) self.r = self.v.reflect(self.n) self.assertEqual(self.r, vector(1, 1, 0)) def test_Tuple_reflectingAVectorOffASlantedSurface(self): self.v = vector(0, -1, 0) self.n = vector(math.sqrt(2)/2, math.sqrt(2)/2, 0) self.r = self.v.reflect(self.n) self.assertEqual(self.r, vector(1, 0, 0)) # # Point function Unit tests # class TestTuplePoint(unittest.TestCase): def test_point_functionCreatesATupleAsAPoint(self): self.point = point(4, -4, 3) self.assertEqual(self.point, Tuple(4, -4, 3, 1)) # # Vector function Unit tests # class TestTupleVector(unittest.TestCase): def test_vector_functionCreatesATupleAsAVector(self): self.vector = vector(4, -4, 3) self.assertEqual(self.vector, Tuple(4, -4, 3, 0)) if __name__ == '__main__': unittest.main()
from collections.abc import Mapping import numpy as np from pickydict import PickyDict from .utils import load_known_key_conversions _key_regex_replacements = {r"\s": "_", r"[!?.,;:]": ""} _key_replacements = load_known_key_conversions() class Metadata: """Class to handle spectrum metadata in matchms. Metadata entries will be stored as PickyDict dictionary in `metadata.data`. Unlike normal Python dictionaries, not all key names will be accepted. Key names will be forced to be lower-case to avoid confusions between key such as "Precursor_MZ" and "precursor_mz". To avoid the default harmonization of the metadata dictionary use the option `matchms_key_style=False`. Code example: .. code-block:: python metadata = Metadata({"Precursor_MZ": 201.5, "Compound Name": "SuperStuff"}) print(metadata["precursor_mz"]) # => 201.5 print(metadata["compound_name"]) # => SuperStuff Or if the matchms default metadata harmonization should not take place: .. code-block:: python metadata = Metadata({"Precursor_MZ": 201.5, "Compound Name": "SuperStuff"}, matchms_key_style=False) print(metadata["precursor_mz"]) # => 201.5 print(metadata["compound_name"]) # => None (now you need to use "compound name") """ def __init__(self, metadata: dict = None, matchms_key_style: bool = True): """ Parameters ---------- metadata: Spectrum metadata as a dictionary. matchms_key_style: Set to False if metadata harmonization to default keys is not desired. The default is True. """ if metadata is None: self._data = PickyDict({}) elif isinstance(metadata, Mapping): self._data = PickyDict(metadata) else: raise ValueError("Unexpected data type for metadata (should be dictionary, or None).") self.matchms_key_style = matchms_key_style if self.matchms_key_style is True: self.harmonize_metadata() def __eq__(self, other_metadata): if self.keys() != other_metadata.keys(): return False for key, value in self.items(): if isinstance(value, np.ndarray): if not np.all(value == other_metadata.get(key)): return False elif value != other_metadata.get(key): return False return True def harmonize_metadata(self): """Runs default harmonization of metadata. Method harmonized metadata field names which includes setting them to lower-case and runing a series of regex replacements followed by default field name replacements (such as precursor_mass --> precursor_mz). """ self._data.key_regex_replacements = _key_regex_replacements self._data.key_replacements = _key_replacements # ------------------------------ # Getters and Setters # ------------------------------ def get(self, key: str, default=None): """Retrieve value from :attr:`metadata` dict. """ return self._data.copy().get(key, default) def set(self, key: str, value): """Set value in :attr:`metadata` dict. """ self._data[key] = value if self.matchms_key_style is True: self.harmonize_metadata() return self def keys(self): """Retrieve all keys of :attr:`.metadata` dict. """ return self._data.keys() def values(self): """Retrieve all values of :attr:`.metadata` dict. """ return self._data.values() def items(self): """Retrieve all items (key, value pairs) of :attr:`.metadata` dict. """ return self._data.items() def __getitem__(self, key=None): return self.get(key) def __setitem__(self, key, newvalue): self.set(key, newvalue) @property def data(self): return self._data.copy() @data.setter def data(self, new_dict): if isinstance(new_dict, PickyDict): self._data = new_dict elif isinstance(new_dict, Mapping): self._data = PickyDict(new_dict) if self.matchms_key_style is True: self.harmonize_metadata() else: raise TypeError("Expected input of type dict or PickyDict.")
""" Base settings to build other settings files upon. """ import environ ROOT_DIR = ( environ.Path(__file__) - 3 ) # (webscrape/config/settings/base.py - 3 = webscrape/) APPS_DIR = ROOT_DIR.path("webscrape") env = environ.Env() READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False) if READ_DOT_ENV_FILE: # OS environment variables take precedence over variables from .env env.read_env(str(ROOT_DIR.path(".env"))) # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = env.bool("DJANGO_DEBUG", False) # Local time zone. Choices are # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # though not all of them may be available with every OS. # In Windows, this must be set to your system time zone. TIME_ZONE = "UTC" # https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = "en-us" # https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths LOCALE_PATHS = [ROOT_DIR.path("locale")] # DATABASES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#databases # DATABASES = { # "default": env.db("DATABASE_URL", default="postgres:///webscrape") # } # DATABASES["default"]["ATOMIC_REQUESTS"] = True DATABASES = { 'default': { 'NAME': 'messaging', 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'USER': 'messaging', 'PASSWORD': 'messaging', 'HOST': 'localhost', 'PORT': 5432, 'ATOMIC_REQUESTS': True } } # URLS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf ROOT_URLCONF = "config.urls" # https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = "config.wsgi.application" # APPS # ------------------------------------------------------------------------------ DJANGO_APPS = [ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "django.contrib.messages", "django.contrib.staticfiles", # "django.contrib.humanize", # Handy template tags "django.contrib.admin", ] THIRD_PARTY_APPS = [ "rest_framework", ] LOCAL_APPS = [ "webscrape.application.apps.ApplicationConfig", # Your stuff: custom apps go here ] # https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS # MIGRATIONS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules MIGRATION_MODULES = {"sites": "webscrape.contrib.sites.migrations"} # AUTHENTICATION # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends AUTHENTICATION_BACKENDS = [ "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ] # https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model # AUTH_USER_MODEL = "users.User" # https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url # LOGIN_REDIRECT_URL = "users:redirect" # https://docs.djangoproject.com/en/dev/ref/settings/#login-url # LOGIN_URL = "account_login" # PASSWORDS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers PASSWORD_HASHERS = [ # https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django "django.contrib.auth.hashers.Argon2PasswordHasher", "django.contrib.auth.hashers.PBKDF2PasswordHasher", "django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher", "django.contrib.auth.hashers.BCryptSHA256PasswordHasher", ] # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator" }, {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"}, {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"}, {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"}, ] # MIDDLEWARE # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#middleware MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.locale.LocaleMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] # STATIC # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = str(ROOT_DIR("staticfiles")) # https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = "/static/" # https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = [str(APPS_DIR.path("static"))] # https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = [ "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", ] # MEDIA # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = str(APPS_DIR("media")) # https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = "/media/" # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES = [ { # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND "BACKEND": "django.template.backends.django.DjangoTemplates", # https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs "DIRS": [str(APPS_DIR.path("templates"))], "OPTIONS": { # https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types "loaders": [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ], # https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.template.context_processors.i18n", "django.template.context_processors.media", "django.template.context_processors.static", "django.template.context_processors.tz", "django.contrib.messages.context_processors.messages", "webscrape.utils.context_processors.settings_context", ], }, } ] # http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs CRISPY_TEMPLATE_PACK = "bootstrap4" # FIXTURES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs FIXTURE_DIRS = (str(APPS_DIR.path("fixtures")),) # SECURITY # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly SESSION_COOKIE_HTTPONLY = True # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly CSRF_COOKIE_HTTPONLY = True # https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter SECURE_BROWSER_XSS_FILTER = True # https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options X_FRAME_OPTIONS = "DENY" # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = env( "DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend" ) # https://docs.djangoproject.com/en/2.2/ref/settings/#email-timeout EMAIL_TIMEOUT = 5 # ADMIN # ------------------------------------------------------------------------------ # Django Admin URL. ADMIN_URL = "admin/" # https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = [("""Sukant Priyadarshi""", "sukant1994@gmail.com")] # https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS # LOGGING # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#logging # See https://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { "version": 1, "disable_existing_loggers": False, "formatters": { "verbose": { "format": "%(levelname)s %(asctime)s %(module)s " "%(process)d %(thread)d %(message)s" } }, "handlers": { "console": { "level": "DEBUG", "class": "logging.StreamHandler", "formatter": "verbose", } }, "root": {"level": "INFO", "handlers": ["console"]}, } # django-allauth # ------------------------------------------------------------------------------ ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True) # https://django-allauth.readthedocs.io/en/latest/configuration.html ACCOUNT_AUTHENTICATION_METHOD = "username" # https://django-allauth.readthedocs.io/en/latest/configuration.html ACCOUNT_EMAIL_REQUIRED = True # https://django-allauth.readthedocs.io/en/latest/configuration.html ACCOUNT_EMAIL_VERIFICATION = "mandatory" # https://django-allauth.readthedocs.io/en/latest/configuration.html # ACCOUNT_ADAPTER = "webscrape.users.adapters.AccountAdapter" # https://django-allauth.readthedocs.io/en/latest/configuration.html # SOCIALACCOUNT_ADAPTER = "webscrape.users.adapters.SocialAccountAdapter" # Your stuff... # ------------------------------------------------------------------------------
"This creates an HDF5 file with a potentially large number of objects" import sys import numpy import tables filename = sys.argv[1] # Open a new empty HDF5 file fileh = tables.open_file(filename, mode="w") # nlevels -- Number of levels in hierarchy # ngroups -- Number of groups on each level # ndatasets -- Number of arrays on each group # LR: Low ratio groups/datasets #nlevels, ngroups, ndatasets = (3, 1, 1000) # MR: Medium ratio groups/datasets nlevels, ngroups, ndatasets = (3, 10, 100) #nlevels, ngroups, ndatasets = (3, 5, 10) # HR: High ratio groups/datasets #nlevels, ngroups, ndatasets = (30, 10, 10) # Create an Array to save on disk a = numpy.array([-1, 2, 4], numpy.int16) group = fileh.root group2 = fileh.root for k in range(nlevels): for j in range(ngroups): for i in range(ndatasets): # Save the array on the HDF5 file fileh.create_array(group2, 'array' + str(i), a, "Signed short array") # Create a new group group2 = fileh.create_group(group, 'group' + str(j)) # Create a new group group3 = fileh.create_group(group, 'ngroup' + str(k)) # Iterate over this new group (group3) group = group3 group2 = group3 fileh.close()
import logging import os import unittest import pypesto import pypesto.logging class LoggingTest(unittest.TestCase): def test_optimize(self): # logging pypesto.logging.log_to_console(logging.WARN) filename = ".test_logging.tmp" pypesto.logging.log_to_file(logging.DEBUG, filename) logger = logging.getLogger('pypesto') if os.path.exists(filename): os.remove(filename) fh = logging.FileHandler(filename) fh.setLevel(logging.DEBUG) logger.addHandler(fh) logger.info("start test") # problem definition def fun(_): raise Exception("This function cannot be called.") objective = pypesto.Objective(fun=fun) problem = pypesto.Problem(objective, -1, 1) optimizer = pypesto.ScipyOptimizer() options = {'allow_failed_starts': True} # optimization pypesto.minimize(problem, optimizer, 5, options=options) # assert logging worked self.assertTrue(os.path.exists(filename)) f = open(filename, 'rb') content = str(f.read()) f.close() # tidy up os.remove(filename) # check if error message got inserted self.assertTrue("fail" in content) if __name__ == '__main__': suite = unittest.TestSuite() suite.addTest(LoggingTest()) unittest.main()
from django.urls import path from .views import audit_view urlpatterns = [ path('', audit_view, name="audit") ]
from cashbook.models import CashBookTransaction from controls.models import ModuleSettings, Period from django.contrib.auth.mixins import LoginRequiredMixin from django.db.models import F, OuterRef, Subquery, Sum from django.db.models.functions import Coalesce from django.views.generic import TemplateView from purchases.models import PurchaseHeader, PurchaseMatching from sales.models import SaleHeader, SaleMatching class TotalOwedReport: def __init__(self, header_model, match_model): self.header_model = header_model self.match_model = match_model def _report(self, matched_by, matched_to, types, period_subquery): return ( self.header_model .objects .filter(type__in=types) .filter(period__fy_and_period__in=Subquery(period_subquery)) .annotate( mbt=Coalesce( Subquery( matched_by.values('matched_by_total') ), 0 ) ) .annotate( mtt=Coalesce( Subquery( matched_to.values('matched_to_total') ), 0 ) ) .annotate( actual_due=F('due') + F('mbt') + F('mtt') ) ) def _report_per_period_for_last_5_periods(self, matched_by, matched_to, types, period): period_subquery = ( Period .objects .filter(fy_and_period__lte=period.fy_and_period) .values('fy_and_period') .order_by("-fy_and_period") [:5] ) q = ( self ._report(matched_by, matched_to, types, period_subquery) .values('period__fy_and_period') .annotate( total_due=Coalesce(Sum('actual_due'), 0) ) ) report = {} for period in period_subquery: report[period["fy_and_period"]] = 0 for period in q: report[period["period__fy_and_period"]] = period["total_due"] return report def _report_for_all_periods_prior(self, matched_by, matched_to, types, period): """ Get the total owed for all periods prior to @period i.e. the total for 'Older' """ period_subquery = ( Period .objects .filter(fy_and_period__lte=period.fy_and_period) .values('fy_and_period') .order_by("-fy_and_period") [5:] ) return ( self ._report(matched_by, matched_to, types, period_subquery) .aggregate( total_due=Coalesce(Sum('actual_due'), 0) ) ) def report(self, current_period): """ This is used by the dashboard and not the aged creditors report """ matched_by = ( self.match_model .objects .filter(period__fy_and_period__gt=current_period.fy_and_period) .filter(matched_by=OuterRef('pk')) .values('matched_by') .annotate(matched_by_total=Sum('value') * -1) ) matched_to = ( self.match_model .objects .filter(period__fy_and_period__gt=current_period.fy_and_period) .filter(matched_to=OuterRef('pk')) .values('matched_to') .annotate(matched_to_total=Sum('value')) ) non_payment_types = [ t[0] for t in self.header_model.types if t[0] not in self.header_model.payment_types ] report_from_current_to_4_periods_ago = self._report_per_period_for_last_5_periods( matched_by, matched_to, non_payment_types, current_period) older = self._report_for_all_periods_prior( matched_by, matched_to, non_payment_types, current_period) report = [] labels = ["Current", "1 period ago", "2 periods ago", "3 periods ago", "4 periods ago"] for i, (period, value) in enumerate(report_from_current_to_4_periods_ago.items()): r = { "period": labels[i], "value": value } report.append(r) report.append({ "period": "Older", "value": older["total_due"] }) report.reverse() # In UI we actually want 'Older' to show first from left to right i.e. opposite of list return report class DashBoard(LoginRequiredMixin, TemplateView): template_name = "dashboard/dashboard.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) mod_settings = ModuleSettings.objects.first() cash_book_period = mod_settings.cash_book_period cash_book_in_and_out_report = ( CashBookTransaction .objects .cash_book_in_and_out_report(cash_book_period) ) cash_book_in_and_out = [] for period in cash_book_in_and_out_report: p = period["period__fy_and_period"] o = {} o["period"] = p[4:] + " " + p[:4] o["in"] = period["total_monies_in"] o["out"] = period["total_monies_out"] cash_book_in_and_out.append(o) context["cash_in_and_out"] = cash_book_in_and_out owed_to_you = TotalOwedReport( SaleHeader, SaleMatching).report(mod_settings.sales_period) owed_by_you = TotalOwedReport(PurchaseHeader, PurchaseMatching).report( mod_settings.purchases_period) context["owed_to_you"] = owed_to_you context["owed_by_you"] = owed_by_you return context
import os import errno import certifi import requests from deriva.core import urlsplit, get_new_requests_session, stob, make_dirs, DEFAULT_SESSION_CONFIG from deriva.transfer.download import DerivaDownloadError, DerivaDownloadConfigurationError, \ DerivaDownloadAuthenticationError, DerivaDownloadAuthorizationError from deriva.transfer.download.processors.base_processor import BaseProcessor, \ LOCAL_PATH_KEY, FILE_SIZE_KEY, SOURCE_URL_KEY from bdbag import bdbag_ro as ro class BaseQueryProcessor(BaseProcessor): """ Base class for QueryProcessor classes """ HEADERS = {'Connection': 'keep-alive'} def __init__(self, envars=None, **kwargs): super(BaseQueryProcessor, self).__init__(envars, **kwargs) self.catalog = kwargs["catalog"] self.store = kwargs["store"] self.base_path = kwargs["base_path"] self.query = self.parameters["query_path"] if self.envars: self.query = self.query.format(**self.envars) self.sub_path = self.parameters.get("output_path") self.output_filename = self.parameters.get("output_filename") self.store_base = kwargs.get("store_base", "/hatrac/") self.is_bag = kwargs.get("bag", False) self.sessions = kwargs.get("sessions", dict()) self.content_type = "application/octet-stream" self.url = ''.join([self.catalog.get_server_uri(), self.query]) self.ro_file_provenance = stob(self.parameters.get("ro_file_provenance", False if not self.is_bag else True)) self.ro_manifest = self.kwargs.get("ro_manifest") self.ro_author_name = self.kwargs.get("ro_author_name") self.ro_author_orcid = self.kwargs.get("ro_author_orcid") self.output_relpath = None self.output_abspath = None self.paged_query = self.parameters.get("paged_query", False) self.paged_query_size = self.parameters.get("paged_query_size", 100000) def process(self): resp = self.catalogQuery(headers={'accept': self.content_type}) if os.path.isfile(self.output_abspath): if self.ro_manifest and self.ro_file_provenance: ro.add_file_metadata(self.ro_manifest, source_url=self.url, local_path=self.output_relpath, media_type=self.content_type, retrieved_on=ro.make_retrieved_on(), retrieved_by=ro.make_retrieved_by(self.ro_author_name, orcid=self.ro_author_orcid), bundled_as=ro.make_bundled_as()) self.outputs.update({self.output_relpath: {LOCAL_PATH_KEY: self.output_abspath, FILE_SIZE_KEY: os.path.getsize(self.output_abspath), SOURCE_URL_KEY: self.url}}) return self.outputs def catalogQuery(self, headers=None, as_file=True): if not headers: headers = self.HEADERS.copy() else: headers.update(self.HEADERS) if as_file: output_dir = os.path.dirname(self.output_abspath) make_dirs(output_dir) try: if as_file: return self.catalog.getAsFile(self.query, self.output_abspath, headers=headers, delete_if_empty=True, paged=self.paged_query, page_size=self.paged_query_size) else: return self.catalog.get(self.query, headers=headers).json() except requests.HTTPError as e: if e.response.status_code == 401: raise DerivaDownloadAuthenticationError(e) if e.response.status_code == 403: raise DerivaDownloadAuthorizationError(e) if as_file: os.remove(self.output_abspath) raise DerivaDownloadError("Error executing catalog query: %s" % e) except Exception: if as_file: os.remove(self.output_abspath) raise def headForHeaders(self, url, raise_for_status=False): store = self.getHatracStore(url) if store: r = store.head(url, headers=self.HEADERS) if raise_for_status: r.raise_for_status() headers = r.headers else: url = self.getExternalUrl(url) session = self.getExternalSession(urlsplit(url).hostname) r = session.head(url, headers=self.HEADERS) if raise_for_status: r.raise_for_status() headers = r.headers return headers def getHatracStore(self, url): urlparts = urlsplit(url) if not urlparts.path.startswith(self.store_base): return None if url.startswith(self.store_base): return self.store else: serverURI = urlparts.scheme + "://" + urlparts.netloc if serverURI == self.store.get_server_uri(): return self.store else: # do we need to deal with the possibility of a fully qualified URL referencing a different hatrac host? raise DerivaDownloadConfigurationError( "Got a reference to a Hatrac server [%s] that is different from the expected Hatrac server: %s" % ( serverURI, self.store.get_server_uri)) def getExternalUrl(self, url): urlparts = urlsplit(url) if urlparts.path.startswith(self.store_base): path_only = url.startswith(self.store_base) server_uri = urlparts.scheme + "://" + urlparts.netloc if server_uri == self.store.get_server_uri() or path_only: url = ''.join([self.store.get_server_uri(), url]) if path_only else url else: if not (urlparts.scheme and urlparts.netloc): urlparts = urlsplit(self.catalog.get_server_uri()) server_uri = urlparts.scheme + "://" + urlparts.netloc url = ''.join([server_uri, url]) return url def getExternalSession(self, host): sessions = self.sessions auth_params = self.kwargs.get("auth_params", dict()) cookies = auth_params.get("cookies") auth_url = auth_params.get("auth_url") login_params = auth_params.get("login_params") session_config = self.kwargs.get("session_config") session = sessions.get(host) if session is not None: return session if not session_config: session_config = DEFAULT_SESSION_CONFIG session = get_new_requests_session(session_config=session_config) if cookies: session.cookies.update(cookies) if login_params and auth_url: r = session.post(auth_url, data=login_params, verify=certifi.where()) if r.status_code > 203: raise DerivaDownloadError( 'GetExternalSession Failed with Status Code: %s\n%s\n' % (r.status_code, r.text)) sessions[host] = session return session def create_default_paths(self): self.output_relpath, self.output_abspath = self.create_paths(self.base_path, sub_path=self.sub_path, filename=self.output_filename, ext=self.ext, is_bag=self.is_bag, envars=self.envars) def __del__(self): for session in self.sessions.values(): session.close() class CSVQueryProcessor(BaseQueryProcessor): def __init__(self, envars=None, **kwargs): super(CSVQueryProcessor, self).__init__(envars, **kwargs) self.ext = ".csv" self.content_type = "text/csv" self.create_default_paths() class JSONQueryProcessor(BaseQueryProcessor): def __init__(self, envars=None, **kwargs): super(JSONQueryProcessor, self).__init__(envars, **kwargs) self.ext = ".json" self.content_type = "application/json" self.create_default_paths() class JSONStreamQueryProcessor(BaseQueryProcessor): def __init__(self, envars=None, **kwargs): super(JSONStreamQueryProcessor, self).__init__(envars, **kwargs) self.ext = ".json" self.content_type = "application/x-json-stream" self.create_default_paths() class JSONEnvUpdateProcessor(BaseQueryProcessor): def __init__(self, envars=None, **kwargs): super(JSONEnvUpdateProcessor, self).__init__(envars, **kwargs) def process(self): resp = self.catalogQuery(headers={'accept': "application/json"}, as_file=False) if resp: self.envars.update(resp[0]) self._urlencode_envars() return {}
import unittest from sampleproject_tests import mayaTest from mayatdd.mayatest import insideMaya if insideMaya: from maya import cmds @mayaTest class Test(unittest.TestCase): def testMinimal(self): ''' do something with maya.cmds to prove we're actually running this test in Maya. ''' print "running in maya!" cmds.sphere()
from app.routers.audio import router AUDIO_SETTINGS_URL = router.url_path_for("audio_settings") GET_CHOICES_URL = router.url_path_for("get_choices") START_AUDIO_URL = router.url_path_for("start_audio") def test_get_settings(audio_test_client): response = audio_test_client.get(url=AUDIO_SETTINGS_URL) assert response.ok assert b"Audio Settings" in response.content def test_start_audio_default(audio_test_client): response = audio_test_client.get(START_AUDIO_URL) assert response.ok def test_choices_Off(audio_test_client): data = {"music_on": False, "sfx_on": False} response = audio_test_client.post(url=GET_CHOICES_URL, data=data) assert response.ok def test_choices_On(audio_test_client): data = { "music_on": True, "music_choices": ["GASTRONOMICA.mp3"], "music_vol": 50, "sfx_on": True, "sfx_choice": "click_1.wav", "sfx_vol": 50, } response = audio_test_client.post(url=GET_CHOICES_URL, data=data) assert response.ok def test_start_audio(audio_test_client): data = { "music_on": True, "music_choices": ["GASTRONOMICA.mp3"], "music_vol": 50, "sfx_on": True, "sfx_choice": "click_1.wav", "sfx_vol": 50, } audio_test_client.post(url=GET_CHOICES_URL, data=data) response = audio_test_client.get(url=START_AUDIO_URL) assert response.ok def test_start_audio_sfx_off(audio_test_client): data = {"music_on_off": "Off", "sfx_on_off": "Off"} audio_test_client.post(url=GET_CHOICES_URL, data=data) response = audio_test_client.get(url=START_AUDIO_URL) assert response.ok
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2015 Enrique Fernandez # Released under the BSD License. # # Authors: # * Enrique Fernandez import Tkinter import rospy from geometry_msgs.msg import Twist, Vector3 import numpy class MouseTeleop(): def __init__(self): # Retrieve params: self._frequency = rospy.get_param('~frequency', 0.0) self._scale = rospy.get_param('~scale', 1.0) self._holonomic = rospy.get_param('~holonomic', False) # Create twist publisher: self._pub_cmd = rospy.Publisher('mouse_vel', Twist, queue_size=100) # Initialize twist components to zero: self._v_x = 0.0 self._v_y = 0.0 self._w = 0.0 # Initialize mouse position (x, y) to None (unknown); it's initialized # when the mouse button is pressed on the _start callback that handles # that event: self._x = None self._y = None # Create window: self._root = Tkinter.Tk() self._root.title('Mouse Teleop') # Make window non-resizable: self._root.resizable(0, 0) # Create canvas: self._canvas = Tkinter.Canvas(self._root, bg='white') # Create canvas objects: self._canvas.create_arc(0, 0, 0, 0, fill='red', outline='red', width=1, style=Tkinter.PIESLICE, start=90.0, tag='w') self._canvas.create_line(0, 0, 0, 0, fill='blue', width=4, tag='v_x') if self._holonomic: self._canvas.create_line(0, 0, 0, 0, fill='blue', width=4, tag='v_y') # Create canvas text objects: self._text_v_x = Tkinter.StringVar() if self._holonomic: self._text_v_y = Tkinter.StringVar() self._text_w = Tkinter.StringVar() self._label_v_x = Tkinter.Label(self._root, anchor=Tkinter.W, textvariable=self._text_v_x) if self._holonomic: self._label_v_y = Tkinter.Label(self._root, anchor=Tkinter.W, textvariable=self._text_v_y) self._label_w = Tkinter.Label(self._root, anchor=Tkinter.W, textvariable=self._text_w) if self._holonomic: self._text_v_x.set('v_x = %0.2f m/s' % self._v_x) self._text_v_y.set('v_y = %0.2f m/s' % self._v_y) self._text_w.set( 'w = %0.2f deg/s' % self._w) else: self._text_v_x.set('v = %0.2f m/s' % self._v_x) self._text_w.set( 'w = %0.2f deg/s' % self._w) self._label_v_x.pack() if self._holonomic: self._label_v_y.pack() self._label_w.pack() # Bind event handlers: self._canvas.bind('<Button-1>', self._start) self._canvas.bind('<ButtonRelease-1>', self._release) self._canvas.bind('<Configure>', self._configure) if self._holonomic: self._canvas.bind('<B1-Motion>', self._mouse_motion_linear) self._canvas.bind('<Shift-B1-Motion>', self._mouse_motion_angular) self._root.bind('<Shift_L>', self._change_to_motion_angular) self._root.bind('<KeyRelease-Shift_L>', self._change_to_motion_linear) else: self._canvas.bind('<B1-Motion>', self._mouse_motion_angular) self._canvas.pack() # If frequency is positive, use synchronous publishing mode: if self._frequency > 0.0: # Create timer for the given frequency to publish the twist: period = rospy.Duration(1.0 / self._frequency) self._timer = rospy.Timer(period, self._publish_twist) # Start window event manager main loop: self._root.mainloop() def __del__(self): if self._frequency > 0.0: self._timer.shutdown() self._root.quit() def _start(self, event): self._x, self._y = event.y, event.x self._y_linear = self._y_angular = 0 self._v_x = self._v_y = self._w = 0.0 def _release(self, event): self._v_x = self._v_y = self._w = 0.0 self._send_motion() def _configure(self, event): self._width, self._height = event.height, event.width self._c_x = self._height / 2.0 self._c_y = self._width / 2.0 self._r = min(self._height, self._width) * 0.25 def _mouse_motion_linear(self, event): self._v_x, self._v_y = self._relative_motion(event.y, event.x) self._send_motion() def _mouse_motion_angular(self, event): self._v_x, self._w = self._relative_motion(event.y, event.x) self._send_motion() def _update_coords(self, tag, x0, y0, x1, y1): x0 += self._c_x y0 += self._c_y x1 += self._c_x y1 += self._c_y self._canvas.coords(tag, (x0, y0, x1, y1)) def _draw_v_x(self, v): x = -v * float(self._width) self._update_coords('v_x', 0, 0, 0, x) def _draw_v_y(self, v): y = -v * float(self._height) self._update_coords('v_y', 0, 0, y, 0) def _draw_w(self, w): x0 = y0 = -self._r x1 = y1 = self._r self._update_coords('w', x0, y0, x1, y1) yaw = w * numpy.rad2deg(self._scale) self._canvas.itemconfig('w', extent=yaw) def _send_motion(self): v_x = self._v_x * self._scale v_y = self._v_y * self._scale w = self._w * self._scale linear = Vector3(v_x, v_y, 0.0) angular = Vector3(0.0, 0.0, w) self._draw_v_x(self._v_x) if self._holonomic: self._draw_v_y(self._v_y) self._draw_w(self._w) if self._holonomic: self._text_v_x.set('v_x = %0.2f m/s' % self._v_x) self._text_v_y.set('v_y = %0.2f m/s' % self._v_y) self._text_w.set( 'w = %0.2f deg/s' % numpy.rad2deg(self._w)) else: self._text_v_x.set('v = %0.2f m/s' % self._v_x) self._text_w.set( 'w = %0.2f deg/s' % numpy.rad2deg(self._w)) twist = Twist(linear, angular) self._pub_cmd.publish(twist) def _publish_twist(self, event): self._send_motion() def _relative_motion(self, x, y): dx = self._x - x dy = self._y - y dx /= float(self._width) dy /= float(self._height) dx = max(-1.0, min(dx, 1.0)) dy = max(-1.0, min(dy, 1.0)) return dx, dy def _change_to_motion_linear(self, event): if self._y is not None: y = event.x self._y_angular = self._y - y self._y = self._y_linear + y def _change_to_motion_angular(self, event): if self._y is not None: y = event.x self._y_linear = self._y - y self._y = self._y_angular + y def main(): rospy.init_node('mouse_teleop') MouseTeleop() if __name__ == '__main__': try: main() except rospy.ROSInterruptException: pass
from typing import Tuple, Union, Callable, Optional, Sequence from pytest_mock import MockerFixture import pytest import numpy as np import dask.array as da from squidpy.im import ( segment, ImageContainer, SegmentationCustom, SegmentationWatershed, ) from squidpy.im._segment import _SEG_DTYPE from squidpy._constants._constants import SegmentationBackend from squidpy._constants._pkg_constants import Key def dummy_segment(arr: np.ndarray) -> np.ndarray: assert isinstance(arr, np.ndarray) assert arr.ndim == 3 return arr[..., 0].astype(np.uint32) class TestGeneral: @pytest.mark.parametrize("ndim", [2, 3]) def test_input_ndim(self, ndim: int): img = np.zeros(shape=(10, 10)) if ndim == 3: img = img[..., np.newaxis] sc = SegmentationCustom(dummy_segment) res = sc.segment(img) assert isinstance(res, np.ndarray) assert res.ndim == 3 if ndim == 2: assert res.shape == img.shape + (1,) else: assert res.shape == img.shape def test_segment_invalid_shape(self): img = np.zeros(shape=(1, 10, 10, 2)) sc = SegmentationCustom(dummy_segment) with pytest.raises(ValueError, match=r"Expected `2` or `3` dimensions"): sc.segment(img) def test_segment_container(self): img = ImageContainer(np.zeros(shape=(10, 10, 1)), layer="image") sc = SegmentationCustom(dummy_segment) res = sc.segment(img, layer="image", library_id=img["image"].z.values[0]) assert isinstance(res, ImageContainer) assert res.shape == img.shape assert "image" in res assert res["image"].dims == img["image"].dims class TestWatershed: @pytest.mark.parametrize("thresh", [None, 0.1, 0.5, 1.0]) def test_threshold(self, thresh: Optional[float], mocker: MockerFixture): img = np.zeros((100, 200), dtype=np.float64) img[2:10, 2:10] = 1.0 img[30:34, 10:16] = 1.0 img = ImageContainer(img, layer="image") sw = SegmentationWatershed() spy = mocker.spy(sw, "_segment") res = sw.segment(img, layer="image", library_id=img["image"].z.values[0], fn_kwargs={"thresh": thresh}) assert isinstance(res, ImageContainer) spy.assert_called_once() call = spy.call_args_list[0] assert call[1]["thresh"] == thresh class TestHighLevel: def test_invalid_layer(self, small_cont: ImageContainer): with pytest.raises(KeyError, match=r"Image layer `foobar` not found in"): segment(small_cont, layer="foobar") @pytest.mark.parametrize("method", ["watershed", dummy_segment]) def test_method(self, small_cont: ImageContainer, method: Union[str, Callable]): res = segment(small_cont, method=method, copy=True) assert isinstance(res, ImageContainer) assert res.shape == small_cont.shape if callable(method): method = SegmentationBackend.CUSTOM.s assert Key.img.segment(method) in res if method in ("log", "dog", "dog"): assert res[Key.img.segment(method)].values.max() <= 1 @pytest.mark.parametrize("dy", [11, 0.5, None]) @pytest.mark.parametrize("dx", [15, 0.1, None]) def test_size(self, small_cont: ImageContainer, dy: Optional[Union[int, float]], dx: Optional[Union[int, float]]): res = segment(small_cont, size=(dy, dx), copy=True) assert isinstance(res, ImageContainer) assert res.shape == small_cont.shape @pytest.mark.parametrize("channel", [0, 1, 2]) def test_channel(self, small_cont: ImageContainer, channel: int): segment(small_cont, copy=False, layer="image", channel=channel) assert Key.img.segment("watershed") in small_cont np.testing.assert_array_equal( list(small_cont[Key.img.segment("watershed")].dims), ["y", "x", "z", f"{small_cont['image'].dims[-1]}:{channel}"], ) def test_all_channels(self, small_cont: ImageContainer): def func(arr: np.ndarray): assert arr.shape == (small_cont.shape + (n_channels,)) return np.zeros(arr.shape[:2], dtype=np.uint8) n_channels = small_cont["image"].sizes["channels"] segment(small_cont, copy=False, layer="image", channel=None, method=func, layer_added="seg") np.testing.assert_array_equal(small_cont["seg"], np.zeros(small_cont.shape + (1, 1))) assert small_cont["seg"].dtype == _SEG_DTYPE @pytest.mark.parametrize("key_added", [None, "foo"]) def test_key_added(self, small_cont: ImageContainer, key_added: Optional[str]): res = segment(small_cont, copy=False, layer="image", layer_added=key_added) assert res is None assert Key.img.segment("watershed", layer_added=key_added) in small_cont def test_passing_kwargs(self, small_cont: ImageContainer): def func(chunk: np.ndarray, sentinel: bool = False): assert sentinel, "Sentinel not set." return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE) segment( small_cont, method=func, layer="image", layer_added="bar", chunks=25, lazy=False, depth=None, sentinel=True ) assert small_cont["bar"].values.dtype == _SEG_DTYPE np.testing.assert_array_equal(small_cont["bar"].values, 0) @pytest.mark.parametrize("dask_input", [False, True]) @pytest.mark.parametrize("chunks", [25, (50, 50, 1), "auto"]) @pytest.mark.parametrize("lazy", [False, True]) def test_dask_segment( self, small_cont: ImageContainer, dask_input: bool, chunks: Union[int, Tuple[int, ...], str], lazy: bool ): def func(chunk: np.ndarray): if isinstance(chunks, tuple): np.testing.assert_array_equal(chunk.shape, [chunks[0] + 2 * d, chunks[1] + 2 * d, 1]) elif isinstance(chunks, int): np.testing.assert_array_equal(chunk.shape, [chunks + 2 * d, chunks + 2 * d, 1]) return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE) small_cont["foo"] = da.asarray(small_cont["image"].data) if dask_input else small_cont["image"].values d = 10 # overlap depth assert isinstance(small_cont["foo"].data, da.Array if dask_input else np.ndarray) segment(small_cont, method=func, layer="foo", layer_added="bar", chunks=chunks, lazy=lazy, depth={0: d, 1: d}) if lazy: assert isinstance(small_cont["bar"].data, da.Array) small_cont.compute() assert isinstance(small_cont["foo"].data, np.ndarray) else: # make sure we didn't accidentally trigger foo's computation assert isinstance(small_cont["foo"].data, da.Array if dask_input else np.ndarray) assert isinstance(small_cont["bar"].data, np.ndarray) assert small_cont["bar"].values.dtype == _SEG_DTYPE np.testing.assert_array_equal(small_cont["bar"].values, 0) def test_copy(self, small_cont: ImageContainer): prev_keys = set(small_cont) res = segment(small_cont, copy=True, layer="image") assert isinstance(res, ImageContainer) assert set(small_cont) == prev_keys assert Key.img.segment("watershed") in res def test_parallelize(self, small_cont: ImageContainer): res1 = segment(small_cont, layer="image", n_jobs=1, copy=True) res2 = segment(small_cont, layer="image", n_jobs=2, copy=True) np.testing.assert_array_equal( res1[Key.img.segment("watershed")].values, res2[Key.img.segment("watershed")].values ) @pytest.mark.parametrize("chunks", [25, 50]) def test_blocking(self, small_cont: ImageContainer, chunks: int): def func(chunk: np.ndarray): labels = np.zeros(chunk[..., 0].shape, dtype=np.uint32) labels[0, 0] = 1 return labels segment(small_cont, method=func, layer="image", layer_added="bar", chunks=chunks, lazy=False, depth=None) # blocks are label from top-left to bottom-right in an ascending order [0, num_blocks - 1] # lowest n bits are allocated for block, rest is for the label (i.e. for blocksize=25, we need 16 blocks ids # from [0, 15], which can be stored in 4 bits, then we just prepend 1 bit (see the above `func`, resulting # in unique 16 labels [10000, 11111] expected = np.zeros_like(small_cont["bar"].values) start = 16 if chunks == 25 else 4 for i in range(0, 100, chunks): for j in range(0, 100, chunks): expected[i, j] = start start += 1 assert small_cont["bar"].values.dtype == _SEG_DTYPE np.testing.assert_array_equal(small_cont["bar"].values, expected) @pytest.mark.parametrize("size", [None, 11]) def test_watershed_works(self, size: Optional[int]): img_orig = np.zeros((100, 200, 30), dtype=np.float64) img_orig[2:10, 2:10] = 1.0 img_orig[30:34, 10:16] = 1.0 cont = ImageContainer(img_orig, layer="image_0") segment( img=cont, method="watershed", layer="image_0", layer_added="segment", size=size, channel=0, thresh=0.5, ) # check that blobs are in segments assert np.mean(cont.data["segment"].values[img_orig[:, :, 0] > 0] > 0) > 0.5 # for size=10, "fails with `size=10` due to border effects" # the reason why there is no test for it that inside tox, it "works" (i.e. the assertion passes) # but outside, the assertion fails, as it should @pytest.mark.parametrize("library_id", [None, "3", ["1", "2"]]) def test_library_id(self, cont_4d: ImageContainer, library_id: Optional[Union[str, Sequence[str]]]): def func(arr: np.ndarray): assert arr.shape == cont_4d.shape + (1,) return np.ones(arr[..., 0].shape, dtype=_SEG_DTYPE) segment(cont_4d, method=func, layer="image", layer_added="image_seg", library_id=library_id, copy=False) np.testing.assert_array_equal(cont_4d["image"].coords, cont_4d["image_seg"].coords) if library_id is None: np.testing.assert_array_equal(1, cont_4d["image_seg"]) else: if isinstance(library_id, str): library_id = [library_id] for lid in library_id: np.testing.assert_array_equal(1, cont_4d["image_seg"].sel(z=lid)) for lid in set(cont_4d.library_ids) - set(library_id): # channels have been changed, apply sets to 0 np.testing.assert_array_equal(0, cont_4d["image_seg"].sel(z=lid))
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import argparse import contextlib import os import subprocess from pathlib import Path RESTLER_TEMP_DIR = 'restler_working_dir' @contextlib.contextmanager def usedir(dir): """ Helper for 'with' statements that changes the current directory to @dir and then changes the directory back to its original once the 'with' ends. Can be thought of like pushd with an auto popd after the 'with' scope ends """ curr = os.getcwd() os.chdir(dir) try: yield finally: os.chdir(curr) def compile_spec(api_spec_path, restler_dll_path): """ Compiles a specified api spec @param api_spec_path: The absolute path to the Swagger file to compile @type api_spec_path: Str @param restler_dll_path: The absolute path to the RESTler driver's dll @type restler_dll_path: Str @return: None @rtype : None """ if not os.path.exists(RESTLER_TEMP_DIR): os.makedirs(RESTLER_TEMP_DIR) with usedir(RESTLER_TEMP_DIR): command=f"dotnet \"{restler_dll_path}\" compile --api_spec \"{api_spec_path}\"" print(f"command: {command}") subprocess.run(command, shell=True) def add_common_settings(ip, port, host, use_ssl, command): if not use_ssl: command = f"{command} --no_ssl" if ip is not None: command = f"{command} --target_ip {ip}" if port is not None: command = f"{command} --target_port {port}" if host is not None: command = f"{command} --host {host}" return command def replay_bug(ip, port, host, use_ssl, restler_dll_path, replay_log): """ Runs RESTler's replay mode on the specified replay file """ with usedir(RESTLER_TEMP_DIR): command = ( f"dotnet \"{restler_dll_path}\" replay --replay_log \"{replay_log}\"" ) command = add_common_settings(ip, port, host, use_ssl, command) print(f"command: {command}\n") subprocess.run(command, shell=True) def replay_from_dir(ip, port, host, use_ssl, restler_dll_path, replay_dir): import glob from pathlib import Path # get all the 500 replay files in the bug buckets directory bug_buckets = glob.glob(os.path.join(replay_dir, 'RestlerResults', '**/bug_buckets/*500*')) print(f"buckets: {bug_buckets}") for file_path in bug_buckets: if "bug_buckets" in os.path.basename(file_path): continue print(f"Testing replay file: {file_path}") replay_bug(ip, port, host, use_ssl, restler_dll_path, Path(file_path).absolute()) pass def test_spec(ip, port, host, use_ssl, restler_dll_path, task): """ Runs RESTler's test mode on a specified Compile directory @param ip: The IP of the service to test @type ip: Str @param port: The port of the service to test @type port: Str @param host: The hostname of the service to test @type host: Str @param use_ssl: If False, set the --no_ssl parameter when executing RESTler @type use_ssl: Boolean @param restler_dll_path: The absolute path to the RESTler driver's dll @type restler_dll_path: Str @return: None @rtype : None """ import json with usedir(RESTLER_TEMP_DIR): compile_dir = Path(f'Compile') grammar_file_path = compile_dir.joinpath('grammar.py') dictionary_file_path = compile_dir.joinpath('dict.json') settings_file_path = compile_dir.joinpath('engine_settings.json') command = ( f"dotnet \"{restler_dll_path}\" {task} --grammar_file \"{grammar_file_path}\" --dictionary_file \"{dictionary_file_path}\"" f" --settings \"{settings_file_path}\"" ) print(f"command: {command}\n") command = add_common_settings(ip, port, host, use_ssl, command) subprocess.run(command, shell=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--api_spec_path', help='The API Swagger specification to compile and test', type=str, required=False, default=None) parser.add_argument('--ip', help='The IP of the service to test', type=str, required=False, default=None) parser.add_argument('--port', help='The port of the service to test', type=str, required=False, default=None) parser.add_argument('--restler_drop_dir', help="The path to the RESTler drop", type=str, required=True) parser.add_argument('--use_ssl', help='Set this flag if you want to use SSL validation for the socket', action='store_true') parser.add_argument('--host', help='The hostname of the service to test', type=str, required=False, default=None) parser.add_argument('--task', help='The task to run (test, fuzz-lean, fuzz, or replay)' 'For test, fuzz-lean, and fuzz, the spec is compiled first.' 'For replay, bug buckets from the specified task directory are re-played.', type=str, required=False, default='test') parser.add_argument('--replay_bug_buckets_dir', help='For the replay task, specifies the directory in which to search for bug buckets.', type=str, required=False, default=None) args = parser.parse_args() restler_dll_path = Path(os.path.abspath(args.restler_drop_dir)).joinpath('restler', 'Restler.dll') print(f"\nrestler_dll_path: {restler_dll_path}\n") if args.task == "replay": replay_from_dir(args.ip, args.port, args.host, args.use_ssl, restler_dll_path.absolute(), args.replay_bug_buckets_dir) else: if args.api_spec_path is None: print("api_spec_path is required for all tasks except the replay task.") exit(-1) api_spec_path = os.path.abspath(args.api_spec_path) compile_spec(api_spec_path, restler_dll_path.absolute()) test_spec(args.ip, args.port, args.host, args.use_ssl, restler_dll_path.absolute(), args.task) print(f"Test complete.\nSee {os.path.abspath(RESTLER_TEMP_DIR)} for results.")
import urllib from contextlib import suppress from django.conf import settings from django.contrib import messages from django.contrib.auth import login from django.core.exceptions import PermissionDenied, SuspiciousOperation from django.http import FileResponse, Http404, HttpResponseServerError from django.shortcuts import redirect from django.template import TemplateDoesNotExist, loader from django.urls import get_callable from django.utils.http import url_has_allowed_host_and_scheme from django.utils.timezone import now from django.views.generic import FormView from django.views.generic.detail import SingleObjectTemplateResponseMixin from django.views.generic.edit import ModelFormMixin, ProcessFormView from django_context_decorator import context from pretalx.cfp.forms.auth import ResetForm from pretalx.common.mail import SendMailException from pretalx.common.phrases import phrases from pretalx.person.forms import UserForm from pretalx.person.models import User class CreateOrUpdateView( SingleObjectTemplateResponseMixin, ModelFormMixin, ProcessFormView ): def set_object(self): if getattr(self, "object", None) is None: setattr(self, "object", None) with suppress(self.model.DoesNotExist, AttributeError): setattr(self, "object", self.get_object()) def get(self, request, *args, **kwargs): self.set_object() return super().get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.set_object() return super().post(request, *args, **kwargs) def is_form_bound(request, form_name, form_param="form"): return request.method == "POST" and request.POST.get(form_param) == form_name def get_static(request, path, content_type): # pragma: no cover """TODO: move to staticfiles usage as per https://gist.github.com/SmileyChris/8d472f2a67526e36f39f3c33520182bc This would avoid potential directory traversal by … a malicious urlconfig, so not a huge attack vector.""" path = settings.BASE_DIR / "pretalx/static" / path if not path.exists(): raise Http404() return FileResponse( open(path, "rb"), content_type=content_type, as_attachment=False ) class GenericLoginView(FormView): form_class = UserForm @context def password_reset_link(self): return self.get_password_reset_link() def dispatch(self, request, *args, **kwargs): if not self.request.user.is_anonymous: return redirect(self.get_success_url()) return super().dispatch(request, *args, **kwargs) def get_success_url(self): params = self.request.GET.copy() url = urllib.parse.unquote(params.pop("next", [""])[0]) params = "?" + params.urlencode() if params else "" if url and url_has_allowed_host_and_scheme(url, allowed_hosts=None): return url + params return self.success_url + params def form_valid(self, form): pk = form.save() user = User.objects.filter(pk=pk).first() login(self.request, user, backend="django.contrib.auth.backends.ModelBackend") return redirect(self.get_success_url()) class GenericResetView(FormView): form_class = ResetForm def form_valid(self, form): user = form.cleaned_data["user"] if not user or ( user.pw_reset_time and (now() - user.pw_reset_time).total_seconds() < 3600 * 24 ): messages.success(self.request, phrases.cfp.auth_password_reset) return redirect(self.get_success_url()) try: user.reset_password( event=getattr(self.request, "event", None), orga="orga" in self.request.resolver_match.namespaces, ) except SendMailException: # pragma: no cover messages.error(self.request, phrases.base.error_sending_mail) return self.get(self.request, *self.args, **self.kwargs) messages.success(self.request, phrases.cfp.auth_password_reset) user.log_action("pretalx.user.password.reset") return redirect(self.get_success_url()) def handle_500(request): try: template = loader.get_template("500.html") except TemplateDoesNotExist: # pragma: no cover return HttpResponseServerError( "Internal server error. Please contact the administrator for details.", content_type="text/html", ) context = {} try: # This should never fail, but can't be too cautious in error views context["request_path"] = urllib.parse.quote(request.path) except Exception: # pragma: no cover pass return HttpResponseServerError(template.render(context)) def error_view(status_code): if status_code == 4031: return get_callable(settings.CSRF_FAILURE_VIEW) if status_code == 500: return handle_500 exceptions = { 400: SuspiciousOperation, 403: PermissionDenied, 404: Http404, } exception = exceptions[status_code] def error_view(request, *args, **kwargs): raise exception return error_view
# Generated by Django 2.1 on 2019-10-12 09:44 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Stock', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ticker', models.CharField(max_length=10)), ('open', models.FloatField()), ('close', models.FloatField()), ('volume', models.IntegerField()), ], ), ]
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for image utils.""" import errno import math import cryptography import ddt import mock from oslo_concurrency import processutils from oslo_utils import units from six.moves import builtins from cinder import exception from cinder.image import image_utils from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.volume import throttling class TestQemuImgInfo(test.TestCase): @mock.patch('os.name', new='posix') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info(self, mock_exec, mock_info): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) output = image_utils.qemu_img_info(test_path) mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img', 'info', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) self.assertEqual(mock_info.return_value, output) @mock.patch('os.name', new='posix') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_not_root(self, mock_exec, mock_info): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) output = image_utils.qemu_img_info(test_path, force_share=False, run_as_root=False) mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img', 'info', test_path, run_as_root=False, prlimit=image_utils.QEMU_IMG_LIMITS) self.assertEqual(mock_info.return_value, output) @mock.patch('cinder.image.image_utils.os') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_on_nt(self, mock_exec, mock_info, mock_os): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_os.name = 'nt' output = image_utils.qemu_img_info(test_path) mock_exec.assert_called_once_with('qemu-img', 'info', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) self.assertEqual(mock_info.return_value, output) @mock.patch('cinder.utils.execute') def test_get_qemu_img_version(self, mock_exec): mock_out = "qemu-img version 2.0.0" mock_err = mock.sentinel.err mock_exec.return_value = (mock_out, mock_err) expected_version = [2, 0, 0] version = image_utils.get_qemu_img_version() mock_exec.assert_called_once_with('qemu-img', '--version', check_exit_code=False) self.assertEqual(expected_version, version) @mock.patch.object(image_utils, 'get_qemu_img_version') def test_validate_qemu_img_version(self, mock_get_qemu_img_version): fake_current_version = [1, 8] mock_get_qemu_img_version.return_value = fake_current_version minimum_version = '1.8' image_utils.check_qemu_img_version(minimum_version) mock_get_qemu_img_version.assert_called_once_with() @mock.patch.object(image_utils, 'get_qemu_img_version') def _test_validate_unsupported_qemu_img_version(self, mock_get_qemu_img_version, current_version=None): mock_get_qemu_img_version.return_value = current_version minimum_version = '2.0' self.assertRaises(exception.VolumeBackendAPIException, image_utils.check_qemu_img_version, minimum_version) mock_get_qemu_img_version.assert_called_once_with() def test_validate_qemu_img_version_not_installed(self): self._test_validate_unsupported_qemu_img_version() def test_validate_older_qemu_img_version(self): self._test_validate_unsupported_qemu_img_version( current_version=[1, 8]) @ddt.ddt class TestConvertImage(test.TestCase): @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_with_size_info(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.virtual_size = 1048576 throttle = throttling.Throttle(prefix=['cgcmd']) with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, throttle=throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', '-O', out_format, '-t', 'none', source, dest, run_as_root=True) mock_exec.reset_mock() with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False): output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_without_size_info(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.side_effect = ValueError throttle = throttling.Throttle(prefix=['cgcmd']) with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, throttle=throttle) mock_info.assert_called_once_with(source, run_as_root=True) self.assertIsNone(output) mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', '-O', out_format, '-t', 'none', source, dest, run_as_root=True) mock_exec.reset_mock() with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False): output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_defaults_not_block_dev_with_size_info(self, mock_isblk, mock_exec, mock_info, mock_odirect): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format out_subformat = 'fake_subformat' mock_info.return_value.virtual_size = 1048576 output = image_utils.convert_image(source, dest, out_format, out_subformat=out_subformat) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, '-o', 'subformat=%s' % out_subformat, source, dest, run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_defaults_not_block_dev_without_size_info(self, mock_isblk, mock_exec, mock_info, mock_odirect): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format out_subformat = 'fake_subformat' mock_info.side_effect = ValueError output = image_utils.convert_image(source, dest, out_format, out_subformat=out_subformat) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, '-o', 'subformat=%s' % out_subformat, source, dest, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_ami_img(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.virtual_size = 1048576 with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, src_format='AMI') self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, '-t', 'none', source, dest, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support') def test_convert_to_vhd(self, mock_check_odirect, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = "vhd" mock_info.return_value.virtual_size = 1048576 output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) # Qemu uses the legacy "vpc" format name, instead of "vhd". mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', 'vpc', source, dest, run_as_root=True) @ddt.data(True, False) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_convert_to_qcow2(self, compress_option, mock_isblk, mock_exec, mock_info): self.override_config('image_compress_on_upload', compress_option) source = mock.sentinel.source dest = mock.sentinel.dest out_format = 'qcow2' mock_info.return_value.virtual_size = 1048576 image_utils.convert_image(source, dest, out_format, compress=True) exec_args = ['qemu-img', 'convert', '-O', 'qcow2'] if compress_option: exec_args.append('-c') exec_args.extend((source, dest)) mock_exec.assert_called_once_with(*exec_args, run_as_root=True) @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) @mock.patch('os.path.dirname', return_value='fakedir') @mock.patch('os.path.ismount', return_value=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') @mock.patch.object(image_utils.LOG, 'error') def test_not_enough_conversion_space(self, mock_log, mock_tempdir, mock_make, mock_ismount, mock_dirname, mock_isblk, mock_exec, mock_info, mock_odirect, mock_conf): source = mock.sentinel.source mock_conf.image_conversion_dir = 'fakedir' dest = [mock_conf.image_conversion_dir] out_format = mock.sentinel.out_format mock_info.side_effect = ValueError mock_exec.side_effect = processutils.ProcessExecutionError( stderr='No space left on device') self.assertRaises(processutils.ProcessExecutionError, image_utils.convert_image, source, dest, out_format) mock_log.assert_called_with('Insufficient free space on fakedir for' ' image conversion.') class TestResizeImage(test.TestCase): @mock.patch('cinder.utils.execute') def test_defaults(self, mock_exec): source = mock.sentinel.source size = mock.sentinel.size output = image_utils.resize_image(source, size) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'resize', source, 'sentinel.sizeG', run_as_root=False) @mock.patch('cinder.utils.execute') def test_run_as_root(self, mock_exec): source = mock.sentinel.source size = mock.sentinel.size output = image_utils.resize_image(source, size, run_as_root=True) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'resize', source, 'sentinel.sizeG', run_as_root=True) class TestFetch(test.TestCase): @mock.patch('eventlet.tpool.Proxy') @mock.patch('os.stat') @mock.patch('cinder.image.image_utils.fileutils') def test_defaults(self, mock_fileutils, mock_stat, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id path = 'test_path' _user_id = mock.sentinel._user_id _project_id = mock.sentinel._project_id mock_open = mock.mock_open() mock_stat.return_value.st_size = 1048576 with mock.patch('cinder.image.image_utils.open', new=mock_open, create=True): output = image_utils.fetch(ctxt, image_service, image_id, path, _user_id, _project_id) self.assertIsNone(output) mock_proxy.assert_called_once_with(mock_open.return_value) image_service.download.assert_called_once_with(ctxt, image_id, mock_proxy.return_value) mock_open.assert_called_once_with(path, 'wb') mock_fileutils.remove_path_on_error.assert_called_once_with(path) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) def test_fetch_enospc(self): context = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id e = exception.ImageTooBig(image_id=image_id, reason = "fake") e.errno = errno.ENOSPC image_service.download.side_effect = e path = '/test_path' _user_id = mock.sentinel._user_id _project_id = mock.sentinel._project_id with mock.patch('cinder.image.image_utils.open', new=mock.mock_open(), create=True): self.assertRaises(exception.ImageTooBig, image_utils.fetch, context, image_service, image_id, path, _user_id, _project_id) def test_fetch_ioerror(self): context = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id e = IOError() e.errno = errno.ECONNRESET e.strerror = 'Some descriptive message' image_service.download.side_effect = e path = '/test_path' _user_id = mock.sentinel._user_id _project_id = mock.sentinel._project_id with mock.patch('cinder.image.image_utils.open', new=mock.mock_open(), create=True): self.assertRaisesRegex(exception.ImageDownloadFailed, e.strerror, image_utils.fetch, context, image_service, image_id, path, _user_id, _project_id) class MockVerifier(object): def update(self, data): return def verify(self): return True class BadVerifier(object): def update(self, data): return def verify(self): raise cryptography.exceptions.InvalidSignature( 'Invalid signature.' ) class TestVerifyImageSignature(test.TestCase): @mock.patch('cursive.signature_utils.get_verifier') @mock.patch('oslo_utils.fileutils.remove_path_on_error') def test_image_signature_verify_failed(self, mock_remove, mock_get): self.mock_object(builtins, 'open', mock.mock_open()) ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {'img_signature_certificate_uuid': 'fake_uuid', 'img_signature_hash_method': 'SHA-256', 'img_signature': 'signature', 'img_signature_key_type': 'RSA-PSS'}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') mock_get.return_value = BadVerifier() self.assertRaises(exception.ImageSignatureVerificationException, image_utils.verify_glance_image_signature, ctxt, FakeImageService(), 'fake_id', 'fake_path') mock_get.assert_called_once_with( context=ctxt, img_signature_certificate_uuid='fake_uuid', img_signature_hash_method='SHA-256', img_signature='signature', img_signature_key_type='RSA-PSS') @mock.patch('cursive.signature_utils.get_verifier') def test_image_signature_metadata_missing(self, mock_get): ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') result = image_utils.verify_glance_image_signature( ctxt, FakeImageService(), 'fake_id', 'fake_path') self.assertFalse(result) mock_get.assert_not_called() @mock.patch('cursive.signature_utils.get_verifier') def test_image_signature_metadata_incomplete(self, mock_get): ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {'img_signature_certificate_uuid': None, 'img_signature_hash_method': 'SHA-256', 'img_signature': 'signature', 'img_signature_key_type': 'RSA-PSS'}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') self.assertRaises(exception.InvalidSignatureImage, image_utils.verify_glance_image_signature, ctxt, FakeImageService(), 'fake_id', 'fake_path') mock_get.assert_not_called() @mock.patch('six.moves.builtins.open') @mock.patch('eventlet.tpool.execute') @mock.patch('cursive.signature_utils.get_verifier') @mock.patch('oslo_utils.fileutils.remove_path_on_error') def test_image_signature_verify_success(self, mock_remove, mock_get, mock_exec, mock_open): ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {'img_signature_certificate_uuid': 'fake_uuid', 'img_signature_hash_method': 'SHA-256', 'img_signature': 'signature', 'img_signature_key_type': 'RSA-PSS'}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') mock_get.return_value = MockVerifier() result = image_utils.verify_glance_image_signature( ctxt, FakeImageService(), 'fake_id', 'fake_path') self.assertTrue(result) mock_exec.assert_called_once_with( image_utils._verify_image, mock_open.return_value.__enter__.return_value, mock_get.return_value) mock_get.assert_called_once_with( context=ctxt, img_signature_certificate_uuid='fake_uuid', img_signature_hash_method='SHA-256', img_signature='signature', img_signature_key_type='RSA-PSS') class TestVerifyImage(test.TestCase): @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_defaults(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None output = image_utils.fetch_verify_image(ctxt, image_service, image_id, dest) self.assertIsNone(output) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) mock_info.assert_called_once_with(dest, run_as_root=True, force_share=False) mock_fileutils.remove_path_on_error.assert_called_once_with(dest) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_kwargs(self, mock_fetch, mock_fileutils, mock_info, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 2 run_as_root = mock.sentinel.run_as_root mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None mock_data.virtual_size = 1 output = image_utils.fetch_verify_image( ctxt, image_service, image_id, dest, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) mock_fileutils.remove_path_on_error.assert_called_once_with(dest) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) mock_check_size.assert_called_once_with(mock_data.virtual_size, size, image_id) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_format_error(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = None mock_data.backing_file = None self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_backing_file_error(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = 'test_backing_file' self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_size_error(self, mock_fetch, mock_fileutils, mock_info, mock_check_size): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest size = 1 mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None mock_data.virtual_size = 2 * units.Gi mock_check_size.side_effect = exception.ImageUnacceptable( image_id='fake_image_id', reason='test') self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest, size=size) class TestTemporaryDir(test.TestCase): @mock.patch('cinder.image.image_utils.CONF') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') def test_conv_dir_exists(self, mock_tempdir, mock_make, mock_conf): mock_conf.image_conversion_dir = mock.sentinel.conv_dir output = image_utils.temporary_dir() self.assertTrue(mock_make.called) mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir) self.assertEqual(output, mock_tempdir.return_value) @mock.patch('cinder.image.image_utils.CONF') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') def test_create_conv_dir(self, mock_tempdir, mock_make, mock_conf): mock_conf.image_conversion_dir = mock.sentinel.conv_dir output = image_utils.temporary_dir() mock_make.assert_called_once_with(mock.sentinel.conv_dir) mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir) self.assertEqual(output, mock_tempdir.return_value) @mock.patch('cinder.image.image_utils.CONF') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') def test_no_conv_dir(self, mock_tempdir, mock_make, mock_conf): mock_conf.image_conversion_dir = None output = image_utils.temporary_dir() self.assertTrue(mock_make.called) mock_tempdir.assert_called_once_with(dir=None) self.assertEqual(output, mock_tempdir.return_value) @ddt.ddt class TestUploadVolume(test.TestCase): @ddt.data((mock.sentinel.disk_format, mock.sentinel.disk_format, True), (mock.sentinel.disk_format, mock.sentinel.disk_format, False), ('ploop', 'parallels', True), ('ploop', 'parallels', False)) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_diff_format(self, image_format, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf, mock_proxy): input_format, output_format, do_compress = image_format ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': input_format, 'container_format': mock.sentinel.container_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = output_format data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path, compress=do_compress) self.assertIsNone(output) mock_convert.assert_called_once_with(volume_path, temp_file, output_format, run_as_root=True, compress=do_compress) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) mock_open.assert_called_once_with(temp_file, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf, mock_chown, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': mock.sentinel.container_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' mock_os.access.return_value = False output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) self.assertFalse(mock_convert.called) self.assertFalse(mock_info.called) mock_chown.assert_called_once_with(volume_path) mock_open.assert_called_once_with(volume_path, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value) @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_compressed(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf, mock_chown, mock_proxy, mock_engine_ready, mock_get_engine): class fakeEngine(object): def __init__(self): pass def compress_img(self, src, dest, run_as_root): pass ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': 'compressed'} mock_conf.allow_compression_on_image_upload = True volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = 'raw' data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) mock_convert.assert_called_once_with(volume_path, temp_file, 'raw', compress=True, run_as_root=True) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) mock_open.assert_called_once_with(temp_file, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value) mock_engine.compress_img.assert_called() @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_on_nt(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf, mock_chown, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': 'bare'} volume_path = mock.sentinel.volume_path mock_os.name = 'nt' mock_os.access.return_value = False output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) self.assertFalse(mock_convert.called) self.assertFalse(mock_info.called) mock_open.assert_called_once_with(volume_path, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value) @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_on_nt_compressed(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf, mock_chown, mock_proxy, mock_engine_ready, mock_get_engine): class fakeEngine(object): def __init__(self): pass def compress_img(self, src, dest, run_as_root): pass ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': 'compressed'} mock_conf.allow_compression_on_image_upload = True volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = 'raw' data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) mock_convert.assert_called_once_with(volume_path, temp_file, 'raw', compress=True, run_as_root=True) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) mock_open.assert_called_once_with(temp_file, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value) mock_engine.compress_img.assert_called() @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_convert_error(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': mock.sentinel.disk_format, 'container_format': mock.sentinel.container_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = mock.sentinel.other_disk_format data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value self.assertRaises(exception.ImageUnacceptable, image_utils.upload_volume, ctxt, image_service, image_meta, volume_path) mock_convert.assert_called_once_with(volume_path, temp_file, mock.sentinel.disk_format, run_as_root=True, compress=True) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) self.assertFalse(image_service.update.called) class TestFetchToVhd(test.TestCase): @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_defaults(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize out_subformat = 'fake_subformat' output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, dest, blocksize, volume_subformat=out_subformat) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'vpc', blocksize, volume_subformat=out_subformat, user_id=None, project_id=None, run_as_root=True) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_kwargs(self, mock_fetch_to, mock_check_space): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id run_as_root = mock.sentinel.run_as_root out_subformat = 'fake_subformat' output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, dest, blocksize, user_id=user_id, project_id=project_id, run_as_root=run_as_root, volume_subformat=out_subformat) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'vpc', blocksize, volume_subformat=out_subformat, user_id=user_id, project_id=project_id, run_as_root=run_as_root) class TestFetchToRaw(test.TestCase): @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_defaults(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize output = image_utils.fetch_to_raw(ctxt, image_service, image_id, dest, blocksize) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'raw', blocksize, user_id=None, project_id=None, size=None, run_as_root=True) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_kwargs(self, mock_fetch_to, mock_check_space): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = mock.sentinel.size run_as_root = mock.sentinel.run_as_root output = image_utils.fetch_to_raw(ctxt, image_service, image_id, dest, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'raw', blocksize, user_id=user_id, size=size, project_id=project_id, run_as_root=run_as_root) class FakeImageService(object): def __init__(self, db_driver=None, image_service=None, disk_format='raw'): self.temp_images = None self.disk_format = disk_format def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': self.disk_format, 'container_format': 'bare', 'status': 'active'} class TestFetchToVolumeFormat(test.TestCase): @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_defaults(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space): ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIsNone(output) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=True), mock.call(tmp, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=True, src_format='raw') @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_kwargs(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=run_as_root, src_format='raw') mock_check_size.assert_called_once_with(data.virtual_size, size, image_id) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=True) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_convert_from_vhd(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value image_service = FakeImageService(disk_format='vhd') expect_format = 'vpc' output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) mock_repl_xen.assert_called_once_with(tmp) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=run_as_root, src_format=expect_format) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_convert_from_iso(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value image_service = FakeImageService(disk_format='iso') expect_format = 'raw' output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=run_as_root, src_format=expect_format) @mock.patch('cinder.image.image_utils.check_available_space', new=mock.Mock()) @mock.patch('cinder.image.image_utils.is_xenserver_format', new=mock.Mock(return_value=False)) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_temporary_images(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock.sentinel.tmp dummy = mock.sentinel.dummy mock_temp.return_value.__enter__.side_effect = [tmp, dummy] with image_utils.TemporaryImages.fetch(image_service, ctxt, image_id) as tmp_img: self.assertEqual(tmp_img, tmp) output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIsNone(output) self.assertEqual(2, mock_temp.call_count) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=True), mock.call(dummy, force_share=False, run_as_root=True), mock.call(tmp, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=True, src_format='raw') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_no_qemu_img_and_is_raw(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'raw', 'size': 41126400} image_size_m = math.ceil(float(41126400) / units.Mi) output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_called_once_with(tmp, force_share=False, run_as_root=run_as_root) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) mock_copy.assert_called_once_with(tmp, dest, image_size_m, blocksize) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_no_qemu_img_not_raw(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'not_raw'} self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_called_once_with(tmp, force_share=False, run_as_root=run_as_root) self.assertFalse(mock_fetch.called) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_no_qemu_img_no_metadata(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = None self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_called_once_with(tmp, force_share=False, run_as_root=run_as_root) self.assertFalse(mock_fetch.called) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_size_error(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_size): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 1234 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = int(1234.5 * units.Gi) tmp = mock_temp.return_value.__enter__.return_value mock_check_size.side_effect = exception.ImageUnacceptable( image_id='fake_image_id', reason='test') self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_qemu_img_parse_error(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = None data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_backing_file_error(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = mock.sentinel.backing_file data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=True) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_xenserver_to_vhd(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) mock_repl_xen.assert_called_once_with(tmp) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=None, run_as_root=run_as_root, src_format='raw') @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_no_qemu_img_fetch_verify_image(self, mock_conf, mock_temp, mock_info, mock_fetch): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root image_service.show.return_value = {'disk_format': 'raw', 'size': 41126400} image_utils.fetch_verify_image( ctxt, image_service, image_id, dest, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_info.assert_called_once_with(dest, force_share=False, run_as_root=run_as_root) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_get_qemu_data_returns_none(self, mock_conf, mock_temp, mock_info): image_id = mock.sentinel.image_id dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root disk_format_raw = True has_meta = True output = image_utils.get_qemu_data(image_id, has_meta, disk_format_raw, dest, run_as_root=run_as_root) self.assertIsNone(output) @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_get_qemu_data_with_image_meta_exception(self, mock_conf, mock_temp, mock_info): image_id = mock.sentinel.image_id dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root disk_format_raw = False has_meta = True self.assertRaises( exception.ImageUnacceptable, image_utils.get_qemu_data, image_id, has_meta, disk_format_raw, dest, run_as_root=run_as_root) @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_get_qemu_data_without_image_meta_except(self, mock_conf, mock_temp, mock_info): image_id = mock.sentinel.image_id dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root disk_format_raw = False has_meta = False self.assertRaises( exception.ImageUnacceptable, image_utils.get_qemu_data, image_id, has_meta, disk_format_raw, dest, run_as_root=run_as_root) @mock.patch('cinder.image.accelerator.is_gzip_compressed', return_value = True) @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_defaults_compressed(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_engine_ready, mock_get_engine, mock_gzip_compressed): class fakeEngine(object): def __init__(self): pass def decompress_img(self, src, dest, run_as_root): pass class FakeImageService(object): def __init__(self, db_driver=None, image_service=None, disk_format='raw'): self.temp_images = None self.disk_format = disk_format def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': self.disk_format, 'container_format': 'compressed', 'status': 'active'} ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIsNone(output) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=True), mock.call(tmp, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=True, src_format='raw') mock_engine.decompress_img.assert_called() class TestXenserverUtils(test.TestCase): def test_is_xenserver_format(self): image_meta1 = {'disk_format': 'vhd', 'container_format': 'ovf'} self.assertTrue(image_utils.is_xenserver_format(image_meta1)) image_meta2 = {'disk_format': 'test_disk_format', 'container_format': 'test_cont_format'} self.assertFalse(image_utils.is_xenserver_format(image_meta2)) @mock.patch('cinder.image.image_utils.utils.execute') def test_extract_targz(self, mock_exec): name = mock.sentinel.archive_name target = mock.sentinel.target output = image_utils.extract_targz(name, target) mock_exec.assert_called_once_with('tar', '-xzf', name, '-C', target) self.assertIsNone(output) class TestVhdUtils(test.TestCase): @mock.patch('cinder.image.image_utils.utils.execute') def test_set_vhd_parent(self, mock_exec): vhd_path = mock.sentinel.vhd_path parentpath = mock.sentinel.parentpath output = image_utils.set_vhd_parent(vhd_path, parentpath) mock_exec.assert_called_once_with('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath) self.assertIsNone(output) @mock.patch('cinder.image.image_utils.set_vhd_parent') def test_fix_vhd_chain(self, mock_set_parent): vhd_chain = (mock.sentinel.first, mock.sentinel.second, mock.sentinel.third, mock.sentinel.fourth, mock.sentinel.fifth) output = image_utils.fix_vhd_chain(vhd_chain) self.assertIsNone(output) mock_set_parent.assert_has_calls([ mock.call(mock.sentinel.first, mock.sentinel.second), mock.call(mock.sentinel.second, mock.sentinel.third), mock.call(mock.sentinel.third, mock.sentinel.fourth), mock.call(mock.sentinel.fourth, mock.sentinel.fifth)]) @mock.patch('cinder.image.image_utils.utils.execute', return_value=(98765.43210, mock.sentinel.error)) def test_get_vhd_size(self, mock_exec): vhd_path = mock.sentinel.vhd_path output = image_utils.get_vhd_size(vhd_path) mock_exec.assert_called_once_with('vhd-util', 'query', '-n', vhd_path, '-v') self.assertEqual(98765, output) @mock.patch('cinder.image.image_utils.utils.execute') def test_resize_vhd(self, mock_exec): vhd_path = mock.sentinel.vhd_path size = 387549349 journal = mock.sentinel.journal output = image_utils.resize_vhd(vhd_path, size, journal) self.assertIsNone(output) mock_exec.assert_called_once_with('vhd-util', 'resize', '-n', vhd_path, '-s', str(size), '-j', journal) @mock.patch('cinder.image.image_utils.utils.execute') def test_coalesce_vhd(self, mock_exec): vhd_path = mock.sentinel.vhd_path output = image_utils.coalesce_vhd(vhd_path) self.assertIsNone(output) mock_exec.assert_called_once_with('vhd-util', 'coalesce', '-n', vhd_path) @mock.patch('cinder.image.image_utils.temporary_dir') @mock.patch('cinder.image.image_utils.coalesce_vhd') @mock.patch('cinder.image.image_utils.resize_vhd') @mock.patch('cinder.image.image_utils.get_vhd_size') @mock.patch('cinder.image.image_utils.utils.execute') def test_coalesce_chain(self, mock_exec, mock_size, mock_resize, mock_coal, mock_temp): vhd_chain = (mock.sentinel.first, mock.sentinel.second, mock.sentinel.third, mock.sentinel.fourth, mock.sentinel.fifth) # os.path.join does not work with MagicMock objects on Windows. mock_temp.return_value.__enter__.return_value = 'fake_temp_dir' output = image_utils.coalesce_chain(vhd_chain) self.assertEqual(mock.sentinel.fifth, output) mock_size.assert_has_calls([ mock.call(mock.sentinel.first), mock.call(mock.sentinel.second), mock.call(mock.sentinel.third), mock.call(mock.sentinel.fourth)]) mock_resize.assert_has_calls([ mock.call(mock.sentinel.second, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.third, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.fourth, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.fifth, mock_size.return_value, mock.ANY)]) mock_coal.assert_has_calls([ mock.call(mock.sentinel.first), mock.call(mock.sentinel.second), mock.call(mock.sentinel.third), mock.call(mock.sentinel.fourth)]) @mock.patch('cinder.image.image_utils.os.path') def test_discover_vhd_chain(self, mock_path): directory = '/some/test/directory' mock_path.join.side_effect = lambda x, y: '/'.join((x, y)) mock_path.exists.side_effect = (True, True, True, False) output = image_utils.discover_vhd_chain(directory) expected_output = ['/some/test/directory/0.vhd', '/some/test/directory/1.vhd', '/some/test/directory/2.vhd'] self.assertEqual(expected_output, output) @mock.patch('cinder.image.image_utils.temporary_dir') @mock.patch('cinder.image.image_utils.os.rename') @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') @mock.patch('cinder.image.image_utils.coalesce_chain') @mock.patch('cinder.image.image_utils.fix_vhd_chain') @mock.patch('cinder.image.image_utils.discover_vhd_chain') @mock.patch('cinder.image.image_utils.extract_targz') def test_replace_xenserver_image_with_coalesced_vhd( self, mock_targz, mock_discover, mock_fix, mock_coal, mock_delete, mock_rename, mock_temp): image_file = mock.sentinel.image_file tmp = mock_temp.return_value.__enter__.return_value output = image_utils.replace_xenserver_image_with_coalesced_vhd( image_file) self.assertIsNone(output) mock_targz.assert_called_once_with(image_file, tmp) mock_discover.assert_called_once_with(tmp) mock_fix.assert_called_once_with(mock_discover.return_value) mock_coal.assert_called_once_with(mock_discover.return_value) mock_delete.assert_called_once_with(image_file) mock_rename.assert_called_once_with(mock_coal.return_value, image_file) class TestCreateTemporaryFile(test.TestCase): @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.makedirs') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_no_dir(self, mock_mkstemp, mock_dirs, mock_conf, mock_close): mock_conf.image_conversion_dir = None fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) mock_mkstemp.assert_called_once_with(dir=None) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.makedirs') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_with_dir(self, mock_mkstemp, mock_dirs, mock_conf, mock_close): conv_dir = mock.sentinel.image_conversion_dir mock_conf.image_conversion_dir = conv_dir fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) self.assertTrue(mock_dirs.called) mock_mkstemp.assert_called_once_with(dir=conv_dir) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_and_dir(self, mock_mkstemp, mock_dirs, mock_conf, mock_close): conv_dir = mock.sentinel.image_conversion_dir mock_conf.image_conversion_dir = conv_dir fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) mock_dirs.assert_called_once_with(conv_dir) mock_mkstemp.assert_called_once_with(dir=conv_dir) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.remove') @mock.patch('cinder.image.image_utils.os.path.join') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.listdir') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) def test_cleanup_temporary_file(self, mock_path, mock_listdir, mock_conf, mock_join, mock_remove): mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2'] conv_dir = mock.sentinel.image_conversion_dir mock_conf.image_conversion_dir = conv_dir mock_join.return_value = '/test/tmp/tmphost@backend1' image_utils.cleanup_temporary_file('host@backend1') mock_listdir.assert_called_once_with(conv_dir) mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1') @mock.patch('cinder.image.image_utils.os.remove') @mock.patch('cinder.image.image_utils.os.listdir') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=False) def test_cleanup_temporary_file_with_not_exist_path(self, mock_path, mock_conf, mock_listdir, mock_remove): conv_dir = mock.sentinel.image_conversion_dir mock_conf.image_conversion_dir = conv_dir image_utils.cleanup_temporary_file('host@backend1') self.assertFalse(mock_listdir.called) self.assertFalse(mock_remove.called) @mock.patch('cinder.image.image_utils.os.remove') @mock.patch('cinder.image.image_utils.os.path.join') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.listdir') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) def test_cleanup_temporary_file_with_exception(self, mock_path, mock_listdir, mock_conf, mock_join, mock_remove): mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2'] conv_dir = mock.sentinel.image_conversion_dir mock_conf.image_conversion_dir = conv_dir mock_join.return_value = '/test/tmp/tmphost@backend1' mock_remove.side_effect = OSError image_utils.cleanup_temporary_file('host@backend1') mock_listdir.assert_called_once_with(conv_dir) mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1') class TestTemporaryFileContextManager(test.TestCase): @mock.patch('cinder.image.image_utils.create_temporary_file', return_value=mock.sentinel.temporary_file) @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') def test_temporary_file(self, mock_delete, mock_create): with image_utils.temporary_file() as tmp_file: self.assertEqual(mock.sentinel.temporary_file, tmp_file) self.assertFalse(mock_delete.called) mock_delete.assert_called_once_with(mock.sentinel.temporary_file) class TestImageUtils(test.TestCase): def test_get_virtual_size(self): image_id = fake.IMAGE_ID virtual_size = 1073741824 volume_size = 2 virt_size = image_utils.check_virtual_size(virtual_size, volume_size, image_id) self.assertEqual(1, virt_size) def test_get_bigger_virtual_size(self): image_id = fake.IMAGE_ID virtual_size = 3221225472 volume_size = 2 self.assertRaises(exception.ImageUnacceptable, image_utils.check_virtual_size, virtual_size, volume_size, image_id) def test_decode_cipher(self): expected = {'cipher_alg': 'aes-256', 'cipher_mode': 'xts', 'ivgen_alg': 'essiv'} result = image_utils.decode_cipher('aes-xts-essiv', 256) self.assertEqual(expected, result)
import torch import torch.nn as nn import torch.nn.functional as F class MyAwesomeModel(nn.Module): def __init__(self, n_classes): super(MyAwesomeModel, self).__init__() self.feature_extractor = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=6, kernel_size=4, stride=1), nn.Tanh(), nn.AvgPool2d(kernel_size=2), nn.Conv2d(in_channels=6, out_channels=16, kernel_size=4, stride=1), nn.Tanh(), nn.AvgPool2d(kernel_size=2), nn.Conv2d(in_channels=16, out_channels=120, kernel_size=4, stride=1), nn.Tanh(), ) self.classifier = nn.Sequential( nn.Linear(in_features=120, out_features=84), nn.Tanh(), nn.Linear(in_features=84, out_features=n_classes), ) def forward(self, x, return_features=False): x = self.feature_extractor(x) x = torch.flatten(x, 1) logits = self.classifier(x) probs = F.log_softmax(logits, dim=1) if return_features: return x else: return probs
from adventofcode.year_2021.day_2021_01 import readable, short def test_readable_part_one(): answer = readable.part1() assert answer == 1616 def test_readable_part_two(): answer = readable.part2() assert answer == 1645 def test_short_part_one(): answer = short.part1() assert answer == 1616 def test_short_part_two(): answer = short.part2() assert answer == 1645
"""djangoecommerce URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from core.views import index urlpatterns = [ path('', index), path('admin/', admin.site.urls), ]
from stable_baselines.ppo2.run_mujoco import eval_return import cma import numpy as np from stable_baselines.low_dim_analysis.eval_util import * from stable_baselines.low_dim_analysis.common import do_pca, plot_2d, \ dump_rows_write_csv, generate_run_dir, do_proj_on_first_n_IPCA, get_allinone_concat_df from sklearn.decomposition import IncrementalPCA from stable_baselines import logger import pandas as pd from sklearn.decomposition import PCA from joblib import Parallel, delayed from matplotlib import pyplot as plt import time import os from stable_baselines.common.cmd_util import mujoco_arg_parser from stable_baselines.low_dim_analysis.common_parser import get_common_parser from numpy import linalg as LA def plot_cma_returns(plot_dir_alg, name, mean_rets, min_rets, max_rets, show): X = np.arange(len(mean_rets)) fig, ax = plt.subplots() plt.xlabel('num of eval') plt.ylabel('mean returns with min and max filled') ax.plot(X, mean_rets) ax.fill_between(X, min_rets, max_rets, alpha=0.5) file_path = f"{plot_dir_alg}/{name}.pdf" if os.path.isfile(file_path): os.remove(file_path) logger.log(f"saving cma plot to {file_path}") fig.savefig(file_path, dpi=300, bbox_inches='tight', format='pdf') if show: plt.show() def do_cma(cma_args, first_n_pcs, orgin_param, save_dir, starting_coord, var): tic = time.time() #TODO better starting locations, record how many samples, logger.log(f"CMAES STARTING :{starting_coord}") es = cma.CMAEvolutionStrategy(starting_coord, var) total_num_of_evals = 0 total_num_timesteps = 0 mean_rets = [] min_rets = [] max_rets = [] eval_returns = None optimization_path = [] while total_num_timesteps < cma_args.cma_num_timesteps and not es.stop(): solutions = es.ask() optimization_path.extend(solutions) thetas = [np.matmul(coord, first_n_pcs) + orgin_param for coord in solutions] logger.log(f"current time steps num: {total_num_timesteps} total time steps: {cma_args.cma_num_timesteps}") eval_returns = Parallel(n_jobs=cma_args.cores_to_use) \ (delayed(eval_return)(cma_args, save_dir, theta, cma_args.eval_num_timesteps, i) for (i, theta) in enumerate(thetas)) mean_rets.append(np.mean(eval_returns)) min_rets.append(np.min(eval_returns)) max_rets.append(np.max(eval_returns)) total_num_of_evals += len(eval_returns) total_num_timesteps += cma_args.eval_num_timesteps * len(eval_returns) logger.log(f"current eval returns: {str(eval_returns)}") logger.log(f"total timesteps so far: {total_num_timesteps}") negative_eval_returns = [-r for r in eval_returns] es.tell(solutions, negative_eval_returns) es.logger.add() # write data to disc to be plotted es.disp() toc = time.time() logger.log(f"####################################CMA took {toc-tic} seconds") es_logger = es.logger if not hasattr(es_logger, 'xmean'): es_logger.load() n_comp_used = first_n_pcs.shape[0] optimization_path_mean = np.vstack((starting_coord, es_logger.xmean[:,5:5+n_comp_used])) return mean_rets, min_rets, max_rets, np.array(optimization_path), np.array(optimization_path_mean) def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() cma_args, cma_unknown_args = common_arg_parser.parse_known_args() origin = "mean_param" this_run_dir = get_dir_path_for_this_run(cma_args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir(this_run_dir) save_dir = get_save_dir( this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) cma_run_num, cma_intermediate_data_dir = generate_run_dir(get_cma_returns_dirname, intermediate_dir=intermediate_data_dir, n_comp=cma_args.n_comp_to_use) ''' ========================================================================================== get the pc vectors ========================================================================================== ''' logger.log("grab final params") final_file = get_full_param_traj_file_path(traj_params_dir_name, "final") final_param = pd.read_csv(final_file, header=None).values[0] final_pca = IncrementalPCA(n_components=2) # for sparse PCA to speed up theta_file = get_full_param_traj_file_path(traj_params_dir_name, 0) concat_df = pd.read_csv(theta_file, header=None, chunksize=10000) tic = time.time() for chunk in concat_df: logger.log(f"currnet at : {concat_df._currow}") if chunk.shape[0] < 2: logger.log(f"last column too few: {chunk.shape[0]}") continue final_pca.partial_fit(chunk.values) toc = time.time() logger.log('\nElapsed time computing the chunked PCA {:.2f} s\n' .format(toc - tic)) logger.log(final_pca.explained_variance_ratio_) pcs_components = final_pca.components_ first_2_pcs = pcs_components[:2] mean_param = final_pca.mean_ origin_param = mean_param theta_file = get_full_param_traj_file_path(traj_params_dir_name, 0) concat_df = pd.read_csv(theta_file, header=None, chunksize=10000) proj_coords = do_proj_on_first_n_IPCA(concat_df, first_2_pcs, origin_param) ''' ========================================================================================== eval all xy coords ========================================================================================== ''' from stable_baselines.low_dim_analysis.common import plot_contour_trajectory, gen_subspace_coords,do_eval_returns, \ get_allinone_concat_df, do_proj_on_first_n from stable_baselines.ppo2.run_mujoco import eval_return last_proj_coord = do_proj_on_first_n(final_param, first_2_pcs, origin_param) starting_coord = last_proj_coord tic = time.time() #TODO better starting locations, record how many samples, logger.log(f"CMAES STARTING :{starting_coord}") es = cma.CMAEvolutionStrategy(starting_coord, 5) total_num_of_evals = 0 total_num_timesteps = 0 mean_rets = [] min_rets = [] max_rets = [] eval_returns = None optimization_path = [] while total_num_timesteps < cma_args.cma_num_timesteps and not es.stop(): solutions = es.ask() optimization_path.extend(solutions) thetas = [np.matmul(coord, first_2_pcs) + origin_param for coord in solutions] logger.log(f"current time steps num: {total_num_timesteps} total time steps: {cma_args.cma_num_timesteps}") eval_returns = Parallel(n_jobs=cma_args.cores_to_use) \ (delayed(eval_return)(cma_args, save_dir, theta, cma_args.eval_num_timesteps, i) for (i, theta) in enumerate(thetas)) mean_rets.append(np.mean(eval_returns)) min_rets.append(np.min(eval_returns)) max_rets.append(np.max(eval_returns)) total_num_of_evals += len(eval_returns) total_num_timesteps += cma_args.eval_num_timesteps * len(eval_returns) logger.log(f"current eval returns: {str(eval_returns)}") logger.log(f"total timesteps so far: {total_num_timesteps}") negative_eval_returns = [-r for r in eval_returns] es.tell(solutions, negative_eval_returns) es.logger.add() # write data to disc to be plotted es.disp() toc = time.time() logger.log(f"####################################CMA took {toc-tic} seconds") es_logger = es.logger if not hasattr(es_logger, 'xmean'): es_logger.load() n_comp_used = first_2_pcs.shape[0] optimization_path_mean = np.vstack((starting_coord, es_logger.xmean[:,5:5+n_comp_used])) dump_rows_write_csv(cma_intermediate_data_dir, optimization_path_mean, "opt_mean_path") plot_dir = get_plot_dir(cma_args) cma_plot_dir = get_cma_plot_dir(plot_dir, cma_args.n_comp_to_use, cma_run_num, origin=origin) if not os.path.exists(cma_plot_dir): os.makedirs(cma_plot_dir) ret_plot_name = f"cma return on {cma_args.n_comp_to_use} dim space of real pca plane, " \ f"explained {np.sum(final_pca.explained_variance_ratio_[:2])}" plot_cma_returns(cma_plot_dir, ret_plot_name, mean_rets, min_rets, max_rets, show=False) assert proj_coords.shape[1] == 2 xcoordinates_to_eval, ycoordinates_to_eval = gen_subspace_coords(cma_args, np.vstack((proj_coords, optimization_path_mean)).T) from stable_baselines.ppo2.run_mujoco import eval_return thetas_to_eval = [origin_param + x * first_2_pcs[0] + y * first_2_pcs[1] for y in ycoordinates_to_eval for x in xcoordinates_to_eval] tic = time.time() eval_returns = Parallel(n_jobs=-1, max_nbytes='100M') \ (delayed(eval_return)(cma_args, save_dir, theta, cma_args.eval_num_timesteps, i) for (i, theta) in enumerate(thetas_to_eval)) toc = time.time() logger.log(f"####################################1st version took {toc-tic} seconds") plot_contour_trajectory(cma_plot_dir, f"cma redo___{origin}_origin_eval_return_contour_plot", xcoordinates_to_eval, ycoordinates_to_eval, eval_returns, proj_coords[:, 0], proj_coords[:, 1], final_pca.explained_variance_ratio_, num_levels=25, show=False, sub_alg_path=optimization_path_mean.T) opt_mean_path_in_old_basis = [mean_projected_param.dot(first_2_pcs) + mean_param for mean_projected_param in optimization_path_mean] distance_to_final = [LA.norm(opt_mean - final_param, ord=2) for opt_mean in opt_mean_path_in_old_basis] distance_to_final_plot_name = f"cma redo distance_to_final over generations " plot_2d(cma_plot_dir, distance_to_final_plot_name, np.arange(len(distance_to_final)), distance_to_final, "num generation", "distance_to_final", False) # plot_3d_trajectory(cma_plot_dir, "end_point_origin_eval_return_3d_plot", xcoordinates_to_eval, ycoordinates_to_eval, # eval_returns, proj_xcoord, proj_ycoord, # result["explained_variance_ratio"][:2], # num_levels=15, show=False) if __name__ == '__main__': main() #TODO Give filenames more info to identify which hyperparameter is the data for
import tublatexmaker.latex_creater as convert dict_of_entries = { "(Bahth fī) uṣūl al-fiqh": { "displaytitle": "", "exists": "1", "fulltext": "(Bahth fī) uṣūl al-fiqh", "fullurl": "http://144.173.140.108:8080/tub/index.php/(Bahth_f%C4%AB)_u%E1%B9%A3%C5%ABl_al-fiqh", "namespace": 0, "printouts": { "Title (Arabic)": ["بحث في) أصول " "الفقه)"], "Title (transliterated)": ["(Bahth " "fī) " "uṣūl " "al-fiqh"], "Has author(s)": [{"fulltext": "Muḥammad Jawād b. Aḥmad"}], "Death (Hijri)": [1299], "Death (Gregorian)": [{"timestamp": "-2776982400", "raw": "1/1882"}], "Death (Hijri) text": ["13th century"], "Death (Gregorian) text": ["19th century"], }, } } edition = [ { "City": [ { "fulltext": "Qum", "fullurl": "http://144.173.140.108:8080/tub/index.php/Qum", "namespace": 0, "exists": "1", "displaytitle": "Qom", } ], "Edition type": ["Modern print"], "Has a publisher": ["Majmaʿ al-Fikr al-Islāmī"], "Has editor(s)": ["unknown"], "Published edition of title": [ { "fulltext": "al-Fawāʾid al-Ḥāʾiriyya", "fullurl": "http://144.173.140.108:8080/tub/index.php/al-Faw%C4%81%CA%BEid_al-%E1%B8%A4%C4%81%CA%BEiriyya", "namespace": 0, "exists": "1", "displaytitle": "", } ], "Sort title": ["al-Fawaid al-Ḥairiyya"], "Title (Arabic)": ["الفوائد الحائرية"], "Title (transliterated)": ["al-Fawāʾid al-Ḥāʾiriyya"], "Year (Gregorian)": [], "Year (Gregorian) text": [], "Year (Hijri)": [], "Year (Hijri) text": [], "page_name": "Edition:al-Fawāʾid al-Ḥāʾiriyya", }, { "City": [ { "fulltext": "Qum", "fullurl": "http://144.173.140.108:8080/tub/index.php/Qum", "namespace": 0, "exists": "1", "displaytitle": "Qom", } ], "Edition type": ["Modern print"], "Has a publisher": ["Majmaʿ al-Fikr al-Islāmī"], "Has editor(s)": ["unknown"], "Published edition of title": [ { "fulltext": "al-Fawāʾid al-Ḥāʾiriyya", "fullurl": "http://144.173.140.108:8080/tub/index.php/al-Faw%C4%81%CA%BEid_al-%E1%B8%A4%C4%81%CA%BEiriyya", "namespace": 0, "exists": "1", "displaytitle": "", } ], "Sort title": ["al-Fawaid al-Ḥairiyya"], "Title (Arabic)": ["الفوائد الحائرية"], "Title (transliterated)": ["al-Fawāʾid al-Ḥāʾiriyya"], "Year (Gregorian)": [], "Year (Gregorian) text": [], "Year (Hijri)": [], "Year (Hijri) text": [], "page_name": "Edition:al-Fawāʾid al-Ḥāʾiriyya (1415/1994)", }, ] def create_expected_latex(transliterated_title: str, arabic_title: str) -> str: return f""" \\item \\textbf{{{transliterated_title}}} {arabic_title} \\newline Muḥammad b. Faraj al-Ḥimyarī al-Najafī \\newline (1059/1649) \\newline \\newline \\textbf{{Description}} \\newline A short one-line description. \\newline \\newline \\textbf{{Principle manuscripts}} \\newline This manuscript \\newline \\newline \\textbf{{Editions}} \\newline This edition. \\newline \\newline \\textbf{{Commentaries}} \\newline This commentary. \\newline """ """ def test_convert_to_entry(): entry_values = list(dict_of_entries.values())[0]["printouts"] expected = create_expected_latex("(Bahth fī) uṣūl al-fiqh", "بحث في) أصول الفقه)") assert convert._make_entry(entry_values) == expected """ def test_create_dates(): entry = { "Death (Hijri)": [1299], "Death (Gregorian)": [{"timestamp": "-2776982400", "raw": "1/1882"}], "Death (Hijri) text": ["13th century"], "Death (Gregorian) text": ["19th century"], } assert convert._create_dates(entry) == "(13th century/19th century)" def test_make_editions(): assert ( convert._make_editions_section(edition) == """ \\textbf{Editions}\n\\begin{itemize} \\item \\end{itemize}\n """ )
from typing import Sequence import numpy as np import xarray from xarray import DataArray from xclim.indices.run_length import rle_1d def get_longest_run_start_index( arr: DataArray, window: int = 1, dim: str = "time", ) -> DataArray: return xarray.apply_ufunc( get_index_of_longest_run, arr, input_core_dims=[[dim]], kwargs={"window": window}, vectorize=True, dask="parallelized", output_dtypes=[float], ) def get_index_of_longest_run(arr: Sequence[bool], window: int = 1) -> int: values, rl, pos = rle_1d(arr) if not np.any(values) or np.all(values * rl < window): # type:ignore return 0 index_of_max = np.nanargmax( np.where(values * rl >= window, rl, np.NaN) # type:ignore ) return pos[index_of_max] # type:ignore def get_first_occurrence_index(da: DataArray) -> DataArray: """ Return the index of the first True value in the 3D booleans array along time dimension. """ stacked = da.stack(latlon=("lat", "lon")) res = stacked.argmax("time") return res.unstack()
# Generated by Django 2.2.7 on 2019-11-20 17:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('notes', '0001_initial'), ] operations = [ migrations.AlterField( model_name='note', name='media', field=models.ImageField(help_text='Optional image to add to note.', upload_to='media'), ), ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod class Sampler(object): """Base class for all Samplers. __iter__ is needed no matter whether you use IterableSampler or Squential sampler, if you want implement your own sampler, make clear what the type is your Dataset, if IterableDataset(method __iter__ implemented), try to use IterableSampler, else if you have an IndexDataset(method __getitem__ implemented), your dataset should have method __len__ implemented. """ def __init__(self, data_source): pass @abstractmethod def __iter__(self): raise NotImplementedError class IterableSampler(Sampler): """Interally samples elements, used for datasets retrieved element by interator. yield None to act as a placeholder for each iteration Args: dataset (Dataset): set to None """ def __init__(self): super(IterableSampler, self).__init__(None) def __iter__(self): while True: yield None def __len__(self): return 0 class SequentialSampler(Sampler): """Sequentially samples elements, used for datasets retrieved element by index. Args: dataset (Dataset): index dataset(implement method __len__) for sampling """ def __init__(self, dataset): self.dataset = dataset def __iter__(self): return iter(range(len(self.dataset))) def __len__(self): return len(self.dataset) class BatchSampler(Sampler): """yield a mini-batch of indices for SquentialSampler and batch size length of None list for IterableSampler. Args: sampler (Sampler): sampler used for generating batches. batch_size (int): Size of mini-batch. drop_last (bool): BatchSampler will drop the last batch if drop_last is True, else will return the last batch whose size will be less than batch_size """ def __init__(self, sampler, batch_size, drop_last=True): if isinstance(drop_last, bool): self.drop_last = drop_last else: raise ValueError("last_batch only support bool as input") self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last def __iter__(self): batch = [] for idx in self.sampler: batch.append(idx) if len(batch) == self.batch_size: yield batch batch = [] if len(batch) > 0 and not self.drop_last: yield batch def __len__(self): if self.drop_last: return len(self.sampler) // self.batch_size else: return (len(self.sampler) + self.batch_size - 1) // self.batch_size
from __future__ import annotations import shutil from betfairlightweight.resources.streamingresources import MarketDefinition from betfairlightweight.resources.bettingresources import MarketCatalogue, MarketBook from betfairlightweight.streaming.listener import StreamListener import sqlalchemy from sqlalchemy.sql.expression import ColumnElement from sqlalchemy.sql.selectable import CTE from sqlalchemy import create_engine, func, DECIMAL from sqlalchemy.orm import Session from sqlalchemy.sql.schema import Table from sqlalchemy.ext.automap import automap_base from sqlalchemy.dialects.postgresql import base as psqlbase from sqlalchemy.dialects.postgresql import json as psqljson from sqlalchemy.sql.functions import sum as sql_sum from sqlalchemy_filters.filters import Operator as SqlOperator from sqlalchemy.orm.query import Query from queue import Queue import logging from typing import Optional, Dict, List, Callable, Any, Tuple, Union, Literal, TypedDict from os import path import os from datetime import datetime, timedelta import zlib import yaml import json import sys import dateparser from myutils import dictionaries, registrar from ..exceptions import DBException from .dbfilter import DBFilterHandler active_logger = logging.getLogger(__name__) active_logger.setLevel(logging.INFO) ProcessorKey = Literal['process_in', 'process_out', 'processors'] ProcessorMap = Dict[type, Dict[ProcessorKey, List[str]]] Processor = Callable[[Any], Any] db_processors = registrar.Registrar[Processor]() DB_PROCESSORS: ProcessorMap = { psqlbase.BYTEA: { 'process_in': [ 'prc_compress' ], 'process_out': [ 'prc_decompress', ] }, } CACHE_PROCESSORS: ProcessorMap = { psqlbase.BYTEA: { 'process_in': [ 'prc_str_encode', ], 'process_out': [ 'prc_str_decode' ] }, psqlbase.TIMESTAMP: { 'process_in': [ 'prc_dt_from_str', ], 'process_out': [ 'prc_dt_to_str' ] }, psqlbase.INTERVAL: { 'process_in': [ 'prc_td_from_float', ], 'process_out': [ 'prc_td_to_float' ] }, psqljson.JSON: { 'process_in': [ 'prc_json_decode', ], 'process_out': [ 'prc_json_encode' ] } } @db_processors.register_element def prc_str_to_dt(data): return dateparser.parse(data, settings={'DATE_ORDER': 'DMY'}) # use UK day-month-year instead of US month-day-year @db_processors.register_element def prc_compress(data): return zlib.compress(data) @db_processors.register_element def prc_decompress(data): return zlib.decompress(data) @db_processors.register_element def prc_str_encode(data): return data.encode() @db_processors.register_element def prc_str_decode(data): return data.decode() @db_processors.register_element def prc_td_to_float(data: timedelta): return data.total_seconds() @db_processors.register_element def prc_td_from_float(data): return timedelta(seconds=data) @db_processors.register_element def prc_dt_from_str(data): return datetime.fromisoformat(data) @db_processors.register_element def prc_dt_to_str(data): return data.isoformat() @db_processors.register_element def prc_json_encode(data): return json.dumps(data) @db_processors.register_element def prc_json_decode(data): return json.loads(data) class DBBase: def __init__( self, db_lang=None, db_user=None, db_host=None, db_port=None, db_name=None, db_pwd=None, db_engine=None, col_processors=None, engine_kwargs=None ): self.col_prcs = col_processors or DB_PROCESSORS self.Base = automap_base() engine_kwargs = engine_kwargs or {} # TODO - remove? engine_str = f'+{db_engine}' if db_engine else '' url = f'{db_lang}{engine_str}://{db_user}:{db_pwd}@{db_host}:{db_port}/{db_name}' # prioritise engine kwargs if provided - "url" key will override constructed if provided engine_kwargs = {'url': url} | engine_kwargs active_logger.info(f'connecting to database with kwargs:\n{engine_kwargs}') self.engine = create_engine(**engine_kwargs) self.Base.prepare(self.engine, reflect=True) self.session = Session(self.engine) self.tables: Dict[str, Table] = self.Base.metadata.tables active_logger.info(f'tables found: {list(self.tables.keys())}') def _validate_tbl(self, tbl_name: str): if tbl_name not in self.tables: raise DBException(f'error inserting row, table "{tbl_name}" not found in tables') if tbl_name not in self.Base.classes: raise DBException(f'error inserting row, table "{tbl_name}" not found in base') def _validate_cols(self, tbl_name: str, cols: List[str]): for col in cols: if col not in self.tables[tbl_name].columns: raise DBException(f'column "{col}" not found in table "{tbl_name}"') def _validate_pkeys(self, tbl_nm: str, pkey_flts: Dict): tbl_pkeys = tuple(x.name for x in self.tables[tbl_nm].primary_key) flt_pkeys = tuple(pkey_flts.keys()) if tbl_pkeys != flt_pkeys: raise DBException( f'error writing cache, table primary keys "{tbl_pkeys}" does not match specified "{flt_pkeys}"' ) def apply_basic_filters(self, tbl_nm: str, pkey_flts: Dict) -> Query: return self.session.query(self.tables[tbl_nm]).filter( *[self.tables[tbl_nm].columns[k] == v for k, v in pkey_flts.items()] ) def row_exist(self, tbl_nm: str, pkey_flts: Dict) -> bool: """ Determine if row(s) exist in database for a given table """ return self.apply_basic_filters(tbl_nm, pkey_flts).count() >= 1 def _value_processors(self, value: Any, tbl_name: str, col: str, prcs: ProcessorMap, prc_type: ProcessorKey) -> Any: col_type = type(self.tables[tbl_name].columns[col].type) prc_nms = prcs.get(col_type, {}).get(prc_type) if prc_nms: if type(prc_nms) is not list: raise DBException(f'processors "{prc_type}" for column "{col}" not list') for i, prc_nm in enumerate(prc_nms): prc_func = db_processors[prc_nm] active_logger.info(f'running processor "{prc_type}" #{i}, "{prc_nm}" on column "{col}"') value_out = prc_func(value) value = value_out return value def _process_columns(self, data: Dict, tbl_name: str, prcs: ProcessorMap, prc_type: ProcessorKey) -> None: self._validate_tbl(tbl_name) self._validate_cols(tbl_name, list(data.keys())) for col in data.keys(): val_in = data[col] if val_in is None: active_logger.warning(f'table "{tbl_name}", col "{col}" value is None, skipping processing') else: val_out = self._value_processors(val_in, tbl_name, col, prcs, prc_type) data[col] = val_out def insert_row(self, tbl_name: str, data: Dict): active_logger.info(f'inserting row of information into table "{tbl_name}"') active_logger.info(f'keys passed are:\n' f'{yaml.dump([str(k) for k in data.keys()])}') self._process_columns(data, tbl_name, self.col_prcs, 'process_in') row = self.Base.classes[tbl_name](**data) self.session.add(row) self.session.commit() def read_rows(self, tbl_nm: str, pkey_flts: Dict) -> List[Dict]: active_logger.info(f'reading rows from table "{tbl_nm}" with filter "{pkey_flts}"') self._validate_tbl(tbl_nm) self._validate_pkeys(tbl_nm, pkey_flts) if not self.row_exist(tbl_nm, pkey_flts): raise DBException(f'row in table "{tbl_nm}" with filters "{pkey_flts}" does not exist') sql_rows = self.apply_basic_filters(tbl_nm, pkey_flts).all() rows = [] for row in sql_rows: row_dict = { str(k): v for k, v in dict(row).items() } # convert sqlalchemy key objects to str for yaml self._process_columns(row_dict, tbl_nm, self.col_prcs, 'process_out') rows.append(row_dict) return rows def read_row(self, tbl_nm: str, pkey_flts: Dict) -> Dict: rows = self.read_rows(tbl_nm, pkey_flts) if len(rows) != 1: raise DBException(f'expected 1 row from table "{tbl_nm}" with filters "{pkey_flts}", got {len(rows)}') return rows[0] def delete_rows(self, tbl_nm: str, pkey_flts: Dict) -> int: active_logger.info(f'deleting rows from table "{tbl_nm}" with filters: "{pkey_flts}"') q = self.apply_basic_filters(tbl_nm, pkey_flts) ret = q.delete(synchronize_session='fetch') self.session.commit() return ret def order_query(self, query: Query, cols, order_col: str, order_asc: bool): """apply ordering based on column of cte""" if order_col not in cols: raise DBException(f'cannot order by column "{order_col}", does not exist in CTE') order_func = sqlalchemy.asc if order_asc else sqlalchemy.desc return query.order_by(order_func(cols[order_col])) class DBCache(DBBase): def __init__(self, cache_root, cache_processors=None, **kwargs): super().__init__(**kwargs) self.cache_root = path.abspath(path.expandvars(cache_root)) if not path.isdir(self.cache_root): active_logger.info(f'creating cache root directory at: "{self.cache_root}"') os.makedirs(self.cache_root) else: active_logger.info(f'existing cache root directory found at: "{self.cache_root}"') self.cache_prcs = cache_processors or CACHE_PROCESSORS def cache_tbl(self, tbl_nm) -> str: return path.join(self.cache_root, tbl_nm) def cache_dir(self, tbl_nm: str, pkey_flts: Dict) -> str: return path.join(self.cache_tbl(tbl_nm), *pkey_flts.values()) def cache_col(self, tbl_nm: str, pkey_flts: Dict, col: str) -> str: return path.join(self.cache_dir(tbl_nm, pkey_flts), col) def clear_cache(self, tbl_nm: str, pkey_flts: Dict): active_logger.info(f'clearing cache from table "{tbl_nm}" with filters "{pkey_flts}"') p = self.cache_dir(tbl_nm, pkey_flts) if not path.exists(p): active_logger.info(f'path "{p}" does not exist, skipping') else: if not path.isdir(p): raise DBException(f'path "{p}" is not a directory') active_logger.info(f'removing cache dir: "{p}"') os.rmdir(p) def write_to_cache(self, tbl_nm: str, pkey_flts: Dict, data: Dict): self._validate_pkeys(tbl_nm, pkey_flts) self._validate_tbl(tbl_nm) d = self.cache_dir(tbl_nm, pkey_flts) active_logger.info(f'writing cache to path: "{d}"') if path.exists(d): active_logger.info('path already exists, exiting...') return os.makedirs(d, exist_ok=True) self._process_columns(data, tbl_nm, self.cache_prcs, 'process_out') for k in pkey_flts.keys(): data.pop(k, None) for col in data.keys(): if data[col] is None: active_logger.warning(f'column "{col}" value is none, skipping') else: p = self.cache_col(tbl_nm, pkey_flts, col) active_logger.info(f'writing column "{col}" to file: "{p}"') with open(p, 'w') as f: f.write(data[col]) def read_to_cache(self, tbl_nm: str, pkey_flts: Dict): active_logger.info(f'reading table "{tbl_nm}" row to cache with filters "{pkey_flts}"') data = self.read_row(tbl_nm, pkey_flts) self.write_to_cache(tbl_nm, pkey_flts, data) def insert_from_cache(self, tbl_nm, pkey_flts: Dict): active_logger.info(f'insert row to table "{tbl_nm}" from cache with filters "{pkey_flts}"') self._validate_pkeys(tbl_nm, pkey_flts) self._validate_tbl(tbl_nm) d = self.cache_dir(tbl_nm, pkey_flts) active_logger.info(f'getting files from cache directory: "{d}"') if not path.isdir(d): raise DBException(f'expected to be directory: "{d}"') data = pkey_flts.copy() _, _, files = next(os.walk(d)) self._validate_cols(tbl_nm, files) # files should match column names for fnm in files: fp = self.cache_col(tbl_nm, pkey_flts, fnm) active_logger.info(f'reading column data from file: "{fp}"') with open(fp, 'r') as f: data[fnm] = f.read() self._process_columns(data, tbl_nm, self.cache_prcs, 'process_in') self.insert_row(tbl_nm, data) def _cache_pkeys(self, tbl_nm: str): """ get list of primary key filters from nested dirs in cache """ pkey_names = tuple(x.name for x in self.tables[tbl_nm].primary_key) def _get_pkeys(_dir: str, _base_pkey: Dict, _lvl) -> List: if not path.isdir(_dir): return [] _, dirnames, _ = next(os.walk(_dir)) return [_base_pkey | {pkey_names[_lvl]: d} for d in dirnames] lvl = 0 flts = [{}] while lvl < len(pkey_names): flts_out = [] for f in flts: d = self.cache_dir(tbl_nm, f) flts_out += _get_pkeys(d, f, lvl) flts = flts_out lvl += 1 return flts def scan_cache(self, tbl_nm: str, post_insert: Optional[Callable[[str, Dict], None]] = None) -> List[Dict]: tbl_root = self.cache_tbl(tbl_nm) active_logger.info(f'scanning for cached rows for table "{tbl_nm}" to insert in "{tbl_root}"') flts = self._cache_pkeys(tbl_nm) added_pkeys = [] for pkey_filters in flts: if self.row_exist(tbl_nm, pkey_filters): active_logger.info(f'row "{pkey_filters}" already exists in database, skipping...') else: self.insert_from_cache(tbl_nm, pkey_filters) added_pkeys.append(pkey_filters) if post_insert is not None: post_insert(tbl_nm, pkey_filters) return added_pkeys def wipe_cache(self) -> Tuple[int, int]: active_logger.info(f'clearing cache root at "{self.cache_root}"') _, dirnames, filenames = next(os.walk(self.cache_root)) for fnm in filenames: p = path.join(self.cache_root, fnm) os.remove(p) for dnm in dirnames: p = path.join(self.cache_root, dnm) shutil.rmtree(p) return len(filenames), len(dirnames) class QueryFilter(TypedDict): value: object field: str op: str def apply_filter_spec(tbl: Table, q: Query, filters_spec: List[QueryFilter]) -> Query: """sqlalchemy_filters `apply_filters` function doesn't work with Sqlalchemy V1.14 so i've bodged it myself until they sort it out""" conditions = [ SqlOperator.OPERATORS[f['op']](tbl.columns[f['field']], f['value']) for f in filters_spec ] return q.filter(*conditions) class BettingDB: """ Betting database handler Manages session that connects to remote SQL ase for querying "Historic" markets to are files downloaded directly from betfair's historical data website "Recorded" markets are files from betfair markets recorded through a python script locally, which are recorded with the accompanying market catalogue file """ def __init__(self, **kwargs): self._dbc = DBCache(**kwargs) def read(self, tbl_nm: str, pkey_flts: Dict): return self._dbc.read_row(tbl_nm, pkey_flts) def close(self): self._dbc.session.close() def meta_serialise(self, market_info: Dict) -> None: """run caching serialisation on market information retrieved from 'marketmeta' database""" self._dbc._process_columns(market_info, 'marketmeta', self._dbc.cache_prcs, 'process_out') def meta_de_serialise(self, market_info: Dict) -> None: """run caching de-serialisation on market information that has been serialised""" self._dbc._process_columns(market_info, 'marketmeta', self._dbc.cache_prcs, 'process_in') @staticmethod def get_meta(first_book: MarketBook, cat: MarketCatalogue = None) -> Dict: """ Get metadata corresponding to the "Meta" table in the betting database for a given betfair Market Parameters ---------- first_book : first MarketBook for betfair Market cat : if market is recorded and not historic, this needs to be passed to get venue and runner names Returns dict of metadata ------- """ mktdef: MarketDefinition = first_book.market_definition mktid = first_book.market_id init_time = first_book.publish_time pre_off = mktdef.market_time - init_time metadata = { 'market_id': mktid, 'sport_id': mktdef.event_type_id, 'market_time': mktdef.market_time, 'market_type': mktdef.market_type, 'betting_type': mktdef.betting_type, 'country_code': mktdef.country_code, 'event_id': mktdef.event_id, 'event_name': mktdef.event_name, # historical 'timezone': mktdef.timezone, 'venue': mktdef.venue, 'init_time': init_time, 'pre_off': pre_off, 'format': 'historic', } if cat is not None: metadata['event_name'] = cat.event.name metadata['venue'] = cat.event.venue metadata['format'] = 'recorded' return metadata @staticmethod def get_first_book(file_path: str) -> Optional[MarketBook]: """ read the first line in a historical/streaming file and get the MarketBook parsed object, without reading or processing the rest of the file """ with open(file_path) as f: l = f.readline() q = Queue() # stop it winging about stream latency by using infinity as max latency listener = StreamListener(q, max_latency=sys.float_info.max) listener.register_stream(0, 'marketSubscription') listener.on_data(l) return listener.output_queue.get()[0] def insert_market_meta(self, market_id: str): active_logger.info(f'creating metadata database entry for market "{market_id}"') pkey_flts = {'market_id': market_id} self._dbc.read_to_cache('marketstream', pkey_flts) stream_path = self._dbc.cache_col('marketstream', pkey_flts, 'stream_updates') bk = self.get_first_book(stream_path) cat = None cat_path = self._dbc.cache_col('marketstream', pkey_flts, 'catalogue') if path.exists(cat_path): if path.getsize(cat_path): with open(cat_path, 'r') as f: cat_dict = json.loads(f.read()) try: cat = MarketCatalogue(**cat_dict) except TypeError as e: raise DBException(f'failed to create market catalogue: {e}') if cat is None: names = {r.selection_id: r.name for r in bk.market_definition.runners} else: names = {r.selection_id: r.runner_name for r in cat.runners} for runner_id, name in names.items(): active_logger.info(f'creating row for market "{market_id}", runner "{runner_id}", name "{name}"') self._dbc.insert_row('marketrunners', { 'market_id': market_id, 'runner_id': runner_id, 'runner_name': name }) meta_data = self.get_meta(bk, cat) self._dbc.insert_row('marketmeta', meta_data) def insert_strategy_runners(self, pkey_filters, profit_func: Callable[[str], Dict]): p = self._dbc.cache_col('strategyupdates', pkey_filters, 'strategy_updates') if not path.isfile(p): raise DBException(f'expected strategy update file at "{p}"') runner_profits = profit_func(p) for k, v in runner_profits.items(): self._dbc.insert_row('strategyrunners', pkey_filters | { 'runner_id': k, 'profit': v }) def wipe_cache(self) -> Tuple[int, int]: return self._dbc.wipe_cache() def scan_mkt_cache(self) -> List[Dict]: """ scan marketstream cache files - insert into database if not exist and add corresponding marketmeta and runner rows """ def mkt_post_insert(tbl_name, pkey_flts): if tbl_name != 'marketstream': raise DBException(f'expected "marketstream" table') self.insert_market_meta(pkey_flts['market_id']) return self._dbc.scan_cache('marketstream', mkt_post_insert) def scan_strat_cache(self, profit_func: Callable[[str], Dict]) -> List[Dict]: """ scan strategy cache files - insert into database if not exist """ def strat_post_insert(tbl_nm, pkey_flts): self.insert_strategy_runners(pkey_flts, profit_func) added_keys = self._dbc.scan_cache('strategymeta') self._dbc.scan_cache('strategyupdates', strat_post_insert) return added_keys def write_strat_info(self, strategy_id, type: str, name: str, exec_time: datetime, info: dict): data = { 'type': type, 'name': name, 'exec_time': exec_time, 'info': info } self._dbc.write_to_cache( tbl_nm='strategymeta', pkey_flts={ 'strategy_id': str(strategy_id) }, data=data ) def path_mkt_usr_updates(self, market_id) -> str: return self._dbc.cache_col( tbl_nm='marketstream', pkey_flts={ 'market_id': market_id }, col='user_data' ) def path_mkt_cat(self, market_id) -> str: return self._dbc.cache_col( tbl_nm='marketstream', pkey_flts={ 'market_id': market_id }, col='catalogue', ) def path_mkt_updates(self, market_id) -> str: return self._dbc.cache_col( tbl_nm='marketstream', pkey_flts={ 'market_id': market_id }, col='stream_updates', ) def path_strat_features(self, market_id, strategy_id) -> str: return self._dbc.cache_col( tbl_nm='strategyupdates', pkey_flts={ 'strategy_id': str(strategy_id), 'market_id': market_id, }, col='strategy_features' ) def path_strat_updates(self, market_id, strategy_id) -> str: return self._dbc.cache_col( tbl_nm='strategyupdates', pkey_flts={ 'strategy_id': str(strategy_id), 'market_id': market_id }, col='strategy_updates' ) def paths_market_updates(self, filter_spec: List[QueryFilter], limit=200): tbl = self._dbc.tables['marketmeta'] q = self._dbc.session.query(tbl) q_flt = apply_filter_spec(tbl, q, filter_spec) rows = q_flt.limit(limit).all() update_paths = [] for row in rows: mkt_flt = {'market_id': row.market_id} self._dbc.read_to_cache('marketstream', mkt_flt) p = self._dbc.cache_col('marketstream', mkt_flt, 'stream_updates') if not path.isfile(p): raise DBException(f'expected file at stream update path: "{p}"') update_paths.append(p) return update_paths def rows_runners(self, market_id, strategy_id) -> List[Dict]: """ get filters rows of runners, joined with profit column from strategy """ sr = self._dbc.tables['strategyrunners'] cte_strat = self._dbc.session.query( sr.columns['runner_id'], sr.columns['profit'].label('runner_profit') ).filter( sr.columns['strategy_id'] == strategy_id, sr.columns['market_id'] == market_id ).cte() rn = self._dbc.tables['marketrunners'] rows = self._dbc.session.query( rn, cte_strat.c['runner_profit'], ).join( cte_strat, rn.columns['runner_id'] == cte_strat.c['runner_id'], isouter=True, ).filter( rn.columns['market_id'] == market_id ).all() return [dict(row) for row in rows] def rows_market(self, cte, col_names, max_rows, order_col=None, order_asc=False) -> List[Dict]: cols = [cte.c[nm] for nm in col_names] q = self._dbc.session.query(*cols) if order_col is not None: q = self._dbc.order_query(q, cte.c, order_col, order_asc) rows = q.limit(max_rows).all() return [dict(row) for row in rows] # TODO - implement in UI def rows_strategy(self, max_rows) -> List[Dict]: shn = self._dbc.session sm = self._dbc.tables['strategymeta'] sr = self._dbc.tables['strategyrunners'] p_cte = shn.query( sr.columns['strategy_id'], func.sum(sr.columns['profit']).label('total_profit') ).group_by(sr.columns['strategy_id']).cte() m_cte = shn.query(sr.c['strategy_id'], sr.c['market_id']).distinct().cte() m_cte = shn.query( m_cte.c['strategy_id'], func.count(m_cte.c['market_id']).label('n_markets') ).group_by(m_cte.c['strategy_id']).cte() q = shn.query(sm, p_cte.c['total_profit'], m_cte.c['n_markets']).join( p_cte, sm.c['strategy_id'] == p_cte.c['strategy_id'], isouter=True ).join( m_cte, sm.c['strategy_id'] == m_cte.c['strategy_id'], isouter=True ) return [dict(row) for row in q.limit(max_rows).all()] def filters_labels(self, filters: DBFilterHandler, cte) -> List[List[Dict[str, Any]]]: return filters.filters_labels(self._dbc.session, self._dbc.tables, cte) def cte_count(self, cte: CTE) -> int: return self._dbc.session.query(cte).count() def strategy_count(self) -> int: return self._dbc.session.query(self._dbc.tables['strategymeta']).count() def strategy_delete(self, strategy_id) -> Tuple[int, int ,int]: strategy_id = str(strategy_id) active_logger.info(f'attempting to delete strategy: "{strategy_id}"') pkey_flt = {'strategy_id': strategy_id} if not self._dbc.row_exist('strategymeta', pkey_flt): raise DBException(f'strategy does not exist, using filters: "{pkey_flt}"') if not strategy_id: raise DBException(f'trying to delete strategy where ID passed is blank!') rows = self._dbc.read_rows('strategymeta', pkey_flt) if len(rows) != 1: raise DBException(f'expected 1 strategy meta row with filter: "{pkey_flt}"') n_runners = self._dbc.delete_rows('strategyrunners', pkey_flt) active_logger.info(f'deleted {n_runners} rows from "strategyrunners" table') n_mkts = self._dbc.delete_rows('strategyupdates', pkey_flt) active_logger.info(f'deleted {n_mkts} rows from "strategyupdates" table') n_meta = self._dbc.delete_rows('strategymeta', pkey_flt) active_logger.info(f'deleted {n_meta} rows from "strategymeta" table') return n_meta, n_mkts, n_runners def filters_strat_cte(self, strat_filters: DBFilterHandler) -> CTE: """ get filtered database strategy common table expression (CTE) """ strat_meta = self._dbc.tables['strategymeta'] q = self._dbc.session.query(strat_meta).filter( *strat_filters.filters_conditions(strat_meta) ) return q.cte() def filters_mkt_cte(self, strategy_id, column_filters: List[ColumnElement]) -> CTE: meta = self._dbc.tables['marketmeta'] sr = self._dbc.tables['strategyrunners'] if strategy_id: strat_cte = self._dbc.session.query( sr.columns['market_id'], sql_sum(sr.columns['profit']).label('market_profit') ).filter( sr.columns['strategy_id'] == strategy_id ).group_by( sr.columns['market_id'] ).cte() q = self._dbc.session.query( meta, strat_cte.c['market_profit'] ).join( strat_cte, meta.columns['market_id'] == strat_cte.c['market_id'] ) else: q = self._dbc.session.query( meta, sqlalchemy.null().label('market_profit') ) q = q.filter(*column_filters) return q.cte() def cache_strat_updates(self, strategy_id, market_id): pkey_flts = { 'strategy_id': str(strategy_id), 'market_id': market_id } self._dbc.read_to_cache('strategyupdates', pkey_flts) def cache_strat_meta(self, strategy_id): pkey_flt = {'strategy_id': strategy_id} self._dbc.read_to_cache('strategymeta', pkey_flt) def cache_mkt_stream(self, market_id): pkey_flt = {'market_id': market_id} self._dbc.read_to_cache('marketstream', pkey_flt) def read_mkt_meta(self, market_id) -> Dict: pkey_flt = {'market_id': market_id} return self._dbc.read_row('marketmeta', pkey_flt) def _lost_ids(self, t1: Table, t2, id_col: str): """ get a query for where table `t1` has rows that are not reflected in table `t2`, joined by a column with name specified by `id_col`. table `t2` can be a 1-to-1 mapping of rows from `t1` or 1 to many. E.g. if `t1` had an id column of 'sample_id_col' and some values [1,2,3], and `t2` had hundreds of rows but only with 'sample_id_col' equal to 1 or 2, then the function would return the 'sample_id_col' value of 3 """ cte = self._dbc.session.query( t2.columns[id_col] ).group_by(t2.columns[id_col]).cte() return self._dbc.session.query( t1.columns[id_col], cte.c[id_col] ).join( cte, t1.columns[id_col] == cte.c[id_col], isouter=True ).filter(cte.c[id_col] == None) def health_check(self): mkt_stm = self._dbc.tables['marketstream'] mkt_met = self._dbc.tables['marketmeta'] mkt_run = self._dbc.tables['marketrunners'] # market stream/meta row counts n_mkt = self._dbc.session.query(mkt_stm).count() active_logger.info(f'{n_mkt} market stream rows') n_met = self._dbc.session.query(mkt_met).count() active_logger.info(f'{n_met} market meta rows') # market stream rows without corresponding market meta row q = self._lost_ids(mkt_stm, mkt_met, 'market_id') for row in q.all(): active_logger.error(f'market "{row[0]}" does not have a meta row') # market runner meta row count nrun = self._dbc.session.query(mkt_run).count() active_logger.info(f'{nrun} market runner rows') # market stream rows without any corresponding runner rows q = self._lost_ids(mkt_stm, mkt_run, 'market_id') for row in q.all(): active_logger.error(f'market "{row[0]}" does not have any runner rows') srt_met = self._dbc.tables['strategymeta'] srt_run = self._dbc.tables['strategyrunners'] srt_udt = self._dbc.tables['strategyupdates'] # strategy meta & strategy market update row counts n_srtmet = self._dbc.session.query(srt_met).count() active_logger.info(f'{n_srtmet} strategy meta rows found') n_srtudt = self._dbc.session.query(srt_udt).count() active_logger.info(f'{n_srtudt} strategy market update rows found') # strategy meta rows without any strategy update rows q = self._lost_ids(srt_met, srt_udt, 'strategy_id') for row in q.all(): active_logger.error(f'strategy "{row[0]}" does not have any market updates') # strategy runner row count n_srtrun = self._dbc.session.query(srt_run).count() active_logger.info(f'{n_srtrun} strategy runner rows found') # strategy meta rows without any strategy runner rows q = self._lost_ids(srt_met, srt_run, 'strategy_id') for row in q.all(): active_logger.error(f'strategy "{row[0]}" does not have any runner rows')
"""rest_vk_api URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf.urls import url from main import views urlpatterns = [ url(r'^users/(?P<user_ids>[0-9]+).*', views.get_user, name='get_users'), url(r'^status$', views.status, name='status'), ]
#!/usr/bin/env python from testWrt import testsetup from testWrt.lib import SSHOpenWrt if __name__ == "__main__": ts = testsetup.create_generic() device = SSHOpenWrt(hostname="192.168.1.1", password="test") ret = device.portscan(22) print(ret)
import csv import logging from datetime import datetime, timedelta from typing import Any, Dict, Optional from scrapy import Spider from sqlalchemy.dialects.postgresql import insert from opennem.core.normalizers import normalize_duid from opennem.db import SessionLocal, get_database_engine from opennem.db.models.opennem import FacilityScada from opennem.pipelines.nem.opennem import unit_scada_generate_facility_scada from opennem.schema.network import NetworkWEM from opennem.utils.dates import parse_date from opennem.utils.pipelines import check_spider_pipeline logger = logging.getLogger(__name__) class WemStoreFacilityScada(object): @check_spider_pipeline def process_item( self, item: Dict[str, Any], spider: Optional[Spider] = None ) -> Dict[str, Any]: if "content" not in item: logger.error("No item content slipping store facility scada") return item csvreader = csv.DictReader(item["content"].split("\n")) item["table_schema"] = FacilityScada item["update_fields"] = ["generated", "eoi_quantity"] item["records"] = unit_scada_generate_facility_scada( csvreader, spider, interval_field="Trading Interval", facility_code_field="Facility Code", power_field="EOI Quantity (MW)", energy_field="Energy Generated (MWh)", network=NetworkWEM, ) item["content"] = None return item class WemStoreFacilityIntervals(object): @check_spider_pipeline def process_item( self, item: Dict[str, Any], spider: Optional[Spider] = None ) -> Dict[str, Any]: if "content" not in item: logger.error("No item content slipping store facility scada") return item csvreader = csv.DictReader(item["content"].split("\n")) item["table_schema"] = FacilityScada item["update_fields"] = ["generated"] item["records"] = unit_scada_generate_facility_scada( csvreader, spider, interval_field="PERIOD", facility_code_field="FACILITY_CODE", power_field="ACTUAL_MW", network=NetworkWEM, ) item["content"] = None return item class WemStoreLiveFacilityScada(object): """ Store live facility scada data. @NOTE no longer used """ @check_spider_pipeline def process_item(self, item: Dict[str, Any], spider: Optional[Spider] = None) -> int: session = SessionLocal() engine = get_database_engine() csvreader = csv.DictReader(item["content"].split("\n")) records_to_store = [] last_asat = None for row in csvreader: # @TODO MAX_GEN_CAPACITY # facility_capacity = row["MAX_GEN_CAPACITY"] if row["AS_AT"] != "": last_asat = parse_date(row["AS_AT"], network=NetworkWEM, dayfirst=False) if not last_asat or type(last_asat) is not datetime: logger.error("Invalid row or no datetime") continue # We need to pivot the table since columns are time intervals for i in range(1, 48): column = f"I{i:02}" if column not in row: logger.error("Do not have data for interval {}".format(column)) continue if i > 0: interval = last_asat - timedelta(minutes=(i - 1) * 30) else: interval = last_asat facility_code = normalize_duid(row["FACILITY_CODE"]) val = None try: val = float(row[column]) / 2 or None except ValueError: pass records_to_store.append( { "created_by": spider.name, "network_id": "WEM", "trading_interval": interval, "facility_code": facility_code, "eoi_quantity": val, } ) stmt = insert(FacilityScada).values(records_to_store) stmt.bind = engine stmt = stmt.on_conflict_do_update( index_elements=["trading_interval", "network_id", "facility_code", "is_forecast"], set_={ # "updated_by": stmt.excluded.created_by, "eoi_quantity": stmt.excluded.eoi_quantity, }, ) try: session.execute(stmt) session.commit() except Exception as e: logger.error("Error inserting records") logger.error(e) finally: session.close() return len(records_to_store)
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals from wtforms.ext.sqlalchemy.fields import QuerySelectField from wtforms.fields import BooleanField, TextAreaField from wtforms.fields.html5 import URLField from wtforms.fields.simple import HiddenField, StringField from wtforms.validators import DataRequired, Optional, ValidationError from indico.core.db import db from indico.core.db.sqlalchemy.protection import ProtectionMode from indico.modules.attachments.models.folders import AttachmentFolder from indico.modules.attachments.util import get_default_folder_names from indico.util.i18n import _ from indico.web.flask.util import url_for from indico.web.forms.base import IndicoForm, generated_data from indico.web.forms.fields import (AccessControlListField, EditableFileField, FileField, IndicoDateField, IndicoRadioField, IndicoSelectMultipleCheckboxField) from indico.web.forms.validators import HiddenUnless, UsedIf from indico.web.forms.widgets import SwitchWidget, TypeaheadWidget class AttachmentFormBase(IndicoForm): protected = BooleanField(_("Protected"), widget=SwitchWidget()) folder = QuerySelectField(_("Folder"), allow_blank=True, blank_text=_("No folder selected"), get_label='title', description=_("Adding materials to folders allow grouping and easier permission " "management.")) acl = AccessControlListField(_("Access control list"), [UsedIf(lambda form, field: form.protected.data)], allow_groups=True, allow_external_users=True, allow_event_roles=True, allow_category_roles=True, allow_registration_forms=True, event=lambda form: form.event, default_text=_('Restrict access to this material'), description=_("The list of users and groups allowed to access the material")) def __init__(self, *args, **kwargs): linked_object = kwargs.pop('linked_object') self.event = getattr(linked_object, 'event', None) # not present in categories super(AttachmentFormBase, self).__init__(*args, **kwargs) self.folder.query = (AttachmentFolder .find(object=linked_object, is_default=False, is_deleted=False) .order_by(db.func.lower(AttachmentFolder.title))) @generated_data def protection_mode(self): return ProtectionMode.protected if self.protected.data else ProtectionMode.inheriting class EditAttachmentFormBase(AttachmentFormBase): title = StringField(_("Title"), [DataRequired()]) description = TextAreaField(_("Description")) class AddAttachmentFilesForm(AttachmentFormBase): files = FileField(_("Files"), multiple_files=True) def _get_file_data(attachment): file = attachment.file return { 'url': url_for('attachments.download', attachment, filename=file.filename, from_preview='1'), 'filename': file.filename, 'size': file.size, 'content_type': file.content_type } class EditAttachmentFileForm(EditAttachmentFormBase): file = EditableFileField(_("File"), add_remove_links=False, get_metadata=_get_file_data, description=_("Already uploaded file. Replace it by adding a new file.")) class AttachmentLinkFormMixin(object): title = StringField(_("Title"), [DataRequired()]) link_url = URLField(_("URL"), [DataRequired()]) class AddAttachmentLinkForm(AttachmentLinkFormMixin, AttachmentFormBase): pass class EditAttachmentLinkForm(AttachmentLinkFormMixin, EditAttachmentFormBase): pass class AttachmentFolderForm(IndicoForm): title = HiddenField(_("Name"), [DataRequired()], widget=TypeaheadWidget(), description=_("The name of the folder.")) description = TextAreaField(_("Description"), description=_("Description of the folder and its content")) protected = BooleanField(_("Protected"), widget=SwitchWidget()) acl = AccessControlListField(_("Access control list"), [UsedIf(lambda form, field: form.protected.data)], allow_groups=True, allow_external_users=True, allow_event_roles=True, allow_category_roles=True, allow_registration_forms=True, event=lambda form: form.event, default_text=_('Restrict access to this folder'), description=_("The list of users and groups allowed to access the folder")) is_always_visible = BooleanField(_("Always Visible"), [HiddenUnless('is_hidden', value=False)], widget=SwitchWidget(), description=_("By default, folders are always visible, even if a user cannot " "access them. You can disable this behavior here, hiding the folder " "for anyone who does not have permission to access it.")) is_hidden = BooleanField(_("Always hidden"), [HiddenUnless('is_always_visible', value=False)], widget=SwitchWidget(), description=_("Always hide the folder and its contents from public display areas of " "the event. You can use this for folders to store non-image files used " "e.g. in download links. The access permissions still apply.")) def __init__(self, *args, **kwargs): self.linked_object = kwargs.pop('linked_object') self.event = getattr(self.linked_object, 'event', None) # not present in categories super(AttachmentFolderForm, self).__init__(*args, **kwargs) self.title.choices = self._get_title_suggestions() def _get_title_suggestions(self): query = db.session.query(AttachmentFolder.title).filter_by(is_deleted=False, is_default=False, object=self.linked_object) existing = set(x[0] for x in query) suggestions = set(get_default_folder_names()) - existing if self.title.data: suggestions.add(self.title.data) return sorted(suggestions) def validate_is_always_visible(self, field): if self.is_always_visible.data and self.is_hidden.data: raise ValidationError('These two options cannot be used at the same time') validate_is_hidden = validate_is_always_visible @generated_data def protection_mode(self): return ProtectionMode.protected if self.protected.data else ProtectionMode.inheriting class AttachmentPackageForm(IndicoForm): added_since = IndicoDateField(_('Added Since'), [Optional()], description=_('Include only attachments uploaded after this date')) filter_type = IndicoRadioField(_('Include'), [DataRequired()]) sessions = IndicoSelectMultipleCheckboxField(_('Sessions'), [UsedIf(lambda form, _: form.filter_type.data == 'sessions'), DataRequired()], description=_('Include materials from selected sessions'), coerce=int) contributions = IndicoSelectMultipleCheckboxField(_('Contributions'), [UsedIf(lambda form, _: form.filter_type.data == 'contributions'), DataRequired()], description=_('Include materials from selected contributions'), coerce=int) dates = IndicoSelectMultipleCheckboxField(_('Events scheduled on'), [UsedIf(lambda form, _: form.filter_type.data == 'dates'), DataRequired()], description=_('Include materials from sessions/contributions scheduled ' 'on the selected dates'))
# -*- coding: utf-8 -*- """ Created on Thu Feb 6 20:55:32 2020 @author: arosso """ from recipe_scrapers import scrape_me # give the url as a string, it can be url from any site listed below # scraper = scrape_me('http://allrecipes.com/Recipe/Apple-Cake-Iv/Detail.aspx') scraper = scrape_me('https://www.101cookbooks.com/instant-pot-mushroom-stroganoff/') dict_recipe = dict() dict_recipe['title'] = scraper.title() dict_recipe['total_time'] = scraper.total_time() dict_recipe['yields'] = scraper.yields() dict_recipe['ingredients'] = scraper.ingredients() dict_recipe['instructions'] = scraper.instructions() #dict_recipe['image'] = scraper.image() #dict_recipe['links'] = scraper.links() print(dict_recipe)
import numpy as np import pandas as pd from sklearn.cluster import KMeans import itertools import findspark import pyspark from pyspark.sql.functions import pandas_udf, PandasUDFType from pyspark.sql.types import * import time def simulate_sbm_dc_data(sbm_matrix, sample_size=1000, partition_num=10, cluster_num=3): """ :param sbm_matrix: :param sample_size: :param partition_num: :param cluster_num: :return: """ if (sbm_matrix.shape[0] != cluster_num) | \ (sbm_matrix.shape[1] != cluster_num) | \ (sbm_matrix.shape[0] != sbm_matrix.shape[1]): raise Exception("sbm_matrix shape Error or the Shape is not equal to Cluster_num") else: data_index = [x for x in range(sample_size)] data_cluster = np.random.randint(0, cluster_num, sample_size).tolist() index_cluster = dict(zip(data_index, data_cluster)) X = np.empty(shape=[0, 3], dtype=int) X = np.append(X, [[0, -1, np.random.randint(0, partition_num, 1)[0]]], axis=0) for i in range(1, sample_size): p_num = np.random.randint(0, partition_num, 1)[0] X = np.append(X, [[i, -1, p_num]], axis=0) # to avoid node lost for j in range(i): if np.random.binomial(1, sbm_matrix[index_cluster[i], index_cluster[j]], 1): X = np.append(X, [[i, j, p_num]], axis=0) data_pdf = pd.DataFrame(X, columns=["IndexNum1"] + ["IndexNum2"] + ["PartitionID"]) return data_pdf, index_cluster def get_laplace_matrix(adjacency_matrix, position="master", regularization=False): """ :param adjacency_matrix: 邻接矩阵(方阵或长矩阵) :param position: master或worker :param regularization: 是否进行正则化 :return: 拉普拉斯矩阵 """ if regularization: if position == "master": degree = np.sum(adjacency_matrix, axis=1) d = np.diag((degree + np.mean(degree)) ** (-0.5)) # 得到度矩阵 return np.dot(np.dot(d, adjacency_matrix), d) elif position == "worker": # 2020.7.18 for test out_degree = np.sum(adjacency_matrix, axis=1) out_degree_matrix = np.diag((out_degree + np.mean(out_degree)) ** (-0.5)) for i in range(out_degree_matrix.shape[0]): if out_degree_matrix[i, i] == np.infty: out_degree_matrix[i, i] = 1000 in_degree = np.sum(adjacency_matrix, axis=0) in_degree_matrix = np.diag((in_degree + np.mean(in_degree)) ** (-0.5)) ### laplace_matrix = np.dot(np.dot(out_degree_matrix, adjacency_matrix), in_degree_matrix) return laplace_matrix # D = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5)) # F = np.diag(np.sum(adjacency_matrix, axis=0) ** (-0.5)) # return np.dot(np.dot(D, adjacency_matrix), F) # 得到度矩阵 else: raise Exception("Input Error: worker or master is expected but {} are given".format(position)) else: if position == "master": d = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5)) # 得到度矩阵 return np.dot(np.dot(d, adjacency_matrix), d) elif position == "worker": out_degree_matrix = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5)) for i in range(out_degree_matrix.shape[0]): if out_degree_matrix[i, i] == np.infty: out_degree_matrix[i, i] = 10000 in_degree_matrix = np.diag(np.sum(adjacency_matrix, axis=0) ** (-0.5)) laplace_matrix = np.dot(np.dot(out_degree_matrix, adjacency_matrix), in_degree_matrix) return laplace_matrix # D = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5)) # F = np.diag(np.sum(adjacency_matrix, axis=0) ** (-0.5)) # return np.dot(np.dot(D, adjacency_matrix), F) # 得到度矩阵 else: raise Exception("Input Error: worker or master is expected but {} are given".format(position)) def get_spectral(laplace_matrix, k, normalization=False, method='svd'): """ :param laplace_matrix: 拉普拉斯矩阵 :param k: 截取SVD后的前k个向量 :param normalization: 是否归一化 :param method: 选择用奇异值分解(SVD)还是特征值分解(EVD) :return: 得到的谱 """ if method == 'svd': u, _, _ = np.linalg.svd(laplace_matrix) spectral = u[:, list(range(k))] if normalization: row_len = len(u) # 行数 for i in range(row_len): norm2 = np.linalg.norm(spectral[i]) if norm2: spectral[i] = spectral[i] / np.linalg.norm(spectral[i]) elif method == 'evd': e_vals, e_vecs = np.linalg.eig(laplace_matrix) sorted_indices = np.argsort(e_vals) spectral = e_vecs[:, sorted_indices[:-k-1:-1]] if normalization: row_len = len(e_vecs) # 行数 for i in range(row_len): norm2 = np.linalg.norm(spectral[i]) if norm2: spectral[i] = spectral[i] / np.linalg.norm(spectral[i]) else: raise ValueError("method must be 'svd' or 'evd' but {} is given".format(method)) return spectral def worker_clustering(worker_df, cluster_num): """ :param worker_df: :param method: :param cluster_num: :return: """ node_list = list(set(worker_df["IndexNum1"].tolist())) node_num = len(node_list) index_list = [x for x in range(node_num)] node2index = dict(zip(node_list, index_list)) adj_matrix = np.zeros((node_num, node_num), dtype=int) for i in range(node_num): adj_matrix[i][i] = 10 for row in worker_df.itertuples(index=False, name='Pandas'): item1 = getattr(row, "IndexNum1") item2 = getattr(row, "IndexNum2") if (item2 in node_list) & (item2 != -1): adj_matrix[node2index[item1]][node2index[item2]] = 1 adj_matrix[node2index[item2]][node2index[item1]] = 1 # first, get the laplace matrix laplace_matrix = get_laplace_matrix(adj_matrix, position='master', regularization=False) # second, get the spectral spectral = get_spectral(laplace_matrix, cluster_num, normalization=False, method='svd') # third, do k-means in spectral model = KMeans(n_clusters=cluster_num) model_fit = model.fit(spectral) # do k_means in spectral_transpose # cluster_center = model_fit.cluster_centers_ # center points cluster_label = list(model_fit.labels_) # labels (cluster information) # return worker_num = worker_df["PartitionID"].tolist()[0] out_df = pd.DataFrame({"PartitionID": [worker_num for _ in range(len(node_list))], "IndexNum": node_list, "ClusterExp": cluster_label}) return out_df def get_accurate(clustering_res_df, cluster_number, error=False): """ :param clustering_res_df: a pandas DataFrame about clustering result :param cluster_number: the number of the cluster (the first column is the index, the second column is the right information, the third column is the clustering information) :param error: if error=True, then return the error rate, else, return the accuracy rate :return: the clustering accuracy """ if clustering_res_df.shape[1] != 3: raise Exception("Shape Error: the input DataFrame's column number is not 3") real_dict = {} clustering_dict = {} for i in range(cluster_number): real_df = clustering_res_df.loc[clustering_res_df['ClusterInfo'] == i] clustering_df = clustering_res_df.loc[clustering_res_df['ClusterExp'] == i] real_dict[i] = real_df['IndexNum'].tolist() clustering_dict[i] = clustering_df['IndexNum'].tolist() accuracy_matrix = np.zeros((cluster_number, cluster_number)) for i in range(cluster_number): for j in range(cluster_number): accuracy_matrix[i][j] = len(set(real_dict[i]).intersection(set(clustering_dict[j]))) # for test # print("The accuracy matrix is: \n", accuracy_matrix) case_iterator = itertools.permutations(range(cluster_number), cluster_number) accurate = 0 for item in case_iterator: acc = sum([accuracy_matrix[i][item[i]] for i in range(cluster_number)]) if acc > accurate: accurate = acc if not error: return accurate / clustering_res_df.shape[0] else: return 1 - accurate / clustering_res_df.shape[0] # TODO some SBM matrix sbm_matrix1 = np.array([[0.7, 0.45, 0.45], [0.45, 0.7, 0.45], [0.45, 0.45, 0.7]]) sbm_matrix2 = np.array([[0.8, 0.4, 0.4], [0.4, 0.8, 0.4], [0.4, 0.4, 0.8]]) sbm_matrix3 = np.array([[0.6, 0.45, 0.45], [0.45, 0.6, 0.45], [0.45, 0.45, 0.6]]) sbm_matrix4 = np.array([[0.2, 0.1, 0.1], [0.1, 0.2, 0.1], [0.1, 0.1, 0.2]]) if __name__ == '__main__': # Model Settings sbm_matrix = sbm_matrix4 sample_size = 1000 master_num = 100 worker_per_sub = 20 partition_num = 50 cluster_num = 3 a, b = simulate_sbm_dc_data(sbm_matrix) c = worker_clustering(a, 3) real_label = [] for row in c.itertuples(index=False, name='Pandas'): item = getattr(row, "IndexNum") real_label.append(b[item]) c["ClusterInfo"] = real_label print(get_accurate(c, 3)) print(c) # print(a)
from __future__ import absolute_import import six from string import Formatter class dontexplodedict(object): """ A dictionary that won't throw a KeyError and will return back a sensible default value to be used in string formatting. """ def __init__(self, d=None): self.data = d or {} def __getitem__(self, key): return self.data.get(key, '') class EventError(object): INVALID_DATA = 'invalid_data' INVALID_ATTRIBUTE = 'invalid_attribute' VALUE_TOO_LONG = 'value_too_long' UNKNOWN_ERROR = 'unknown_error' SECURITY_VIOLATION = 'security_violation' RESTRICTED_IP = 'restricted_ip' JS_GENERIC_FETCH_ERROR = 'js_generic_fetch_error' # deprecated in favor of FETCH_GENERIC_ERROR FETCH_GENERIC_ERROR = 'fetch_generic_error' JS_INVALID_HTTP_CODE = 'js_invalid_http_code' # deprecated in favor of FETCH_INVALID_HTTP_CODE FETCH_INVALID_HTTP_CODE = 'fetch_invalid_http_code' JS_INVALID_CONTENT = 'js_invalid_content' JS_NO_COLUMN = 'js_no_column' JS_MISSING_SOURCE = 'js_no_source' JS_INVALID_SOURCEMAP = 'js_invalid_source' JS_TOO_MANY_REMOTE_SOURCES = 'js_too_many_sources' JS_INVALID_SOURCE_ENCODING = 'js_invalid_source_encoding' FETCH_INVALID_ENCODING = 'fetch_invalid_source_encoding' JS_INVALID_SOURCEMAP_LOCATION = 'js_invalid_sourcemap_location' JS_TOO_LARGE = 'js_too_large' # deprecated in favor of FETCH_TOO_LARGE FETCH_TOO_LARGE = 'fetch_too_large' JS_FETCH_TIMEOUT = 'js_fetch_timeout' # deprecated in favor of FETCH_TIMEOUT FETCH_TIMEOUT = 'fetch_timeout' NATIVE_NO_CRASHED_THREAD = 'native_no_crashed_thread' NATIVE_INTERNAL_FAILURE = 'native_internal_failure' NATIVE_NO_SYMSYND = 'native_no_symsynd' NATIVE_BAD_DSYM = 'native_bad_dsym' NATIVE_MISSING_OPTIONALLY_BUNDLED_DSYM = 'native_optionally_bundled_dsym' NATIVE_MISSING_DSYM = 'native_missing_dsym' NATIVE_MISSING_SYSTEM_DSYM = 'native_missing_system_dsym' NATIVE_MISSING_SYMBOL = 'native_missing_symbol' NATIVE_SIMULATOR_FRAME = 'native_simulator_frame' NATIVE_UNKNOWN_IMAGE = 'native_unknown_image' PROGUARD_MISSING_MAPPING = 'proguard_missing_mapping' PROGUARD_MISSING_LINENO = 'proguard_missing_lineno' _messages = { INVALID_DATA: u'Discarded invalid value for parameter \'{name}\'', INVALID_ATTRIBUTE: u'Discarded invalid parameter \'{name}\'', VALUE_TOO_LONG: u'Discarded value for \'{name}\' due to exceeding maximum length', UNKNOWN_ERROR: u'Unknown error', SECURITY_VIOLATION: u'Cannot fetch resource due to security violation on {url}', RESTRICTED_IP: u'Cannot fetch resource due to restricted IP address on {url}', # deprecated in favor of FETCH_GENERIC_ERROR JS_GENERIC_FETCH_ERROR: u'Unable to fetch resource: {url}', FETCH_GENERIC_ERROR: u'Unable to fetch resource: {url}', JS_INVALID_HTTP_CODE: u'HTTP returned {value} response on {url}', # deprecated in favor of FETCH_INVALID_HTTP_CODE FETCH_INVALID_HTTP_CODE: u'HTTP returned {value} response on {url}', JS_INVALID_CONTENT: u'Source file was not JavaScript: {url}', JS_NO_COLUMN: u'Cannot expand sourcemap due to no column information for {url}', JS_MISSING_SOURCE: u'Source code was not found for {url}', JS_INVALID_SOURCEMAP: u'Sourcemap was invalid or not parseable: {url}', JS_TOO_MANY_REMOTE_SOURCES: u'The maximum number of remote source requests was made', JS_INVALID_SOURCE_ENCODING: u'Source file was not \'{value}\' encoding: {url}', FETCH_INVALID_ENCODING: u'Source file was not \'{value}\' encoding: {url}', JS_INVALID_SOURCEMAP_LOCATION: u'Invalid location in sourcemap: ({column}, {row})', # deprecated in favor of FETCH_TOO_LARGE JS_TOO_LARGE: u'Remote file too large: ({max_size:g}MB, {url})', FETCH_TOO_LARGE: u'Remote file too large: ({max_size:g}MB, {url})', # deprecated in favor of FETCH_TIMEOUT JS_FETCH_TIMEOUT: u'Remote file took too long to load: ({timeout}s, {url})', FETCH_TIMEOUT: u'Remote file took too long to load: ({timeout}s, {url})', NATIVE_NO_CRASHED_THREAD: u'No crashed thread found in crash report', NATIVE_INTERNAL_FAILURE: u'Internal failure when attempting to symbolicate: {error}', NATIVE_NO_SYMSYND: u'The symbolizer is not configured for this system.', NATIVE_BAD_DSYM: u'The debug symbol file used was broken.', NATIVE_MISSING_OPTIONALLY_BUNDLED_DSYM: u'An optional debug symbol file was missing.', NATIVE_MISSING_DSYM: u'A required debug symbol file was missing.', NATIVE_MISSING_SYSTEM_DSYM: u'A system debug symbol file was missing.', NATIVE_MISSING_SYMBOL: u'Unable to resolve a symbol.', NATIVE_SIMULATOR_FRAME: u'Encountered an unprocessable simulator frame.', NATIVE_UNKNOWN_IMAGE: u'An binary image is referenced that is unknown.', PROGUARD_MISSING_MAPPING: u'A proguard mapping file was missing.', PROGUARD_MISSING_LINENO: u'A proguard mapping file does not contain line info.', } @classmethod def get_message(cls, data): return Formatter().vformat( cls._messages[data['type']], [], dontexplodedict(data), ) def to_dict(self): return {k: v for k, v in six.iteritems(self) if k != 'type'}
import os.path as pt import numpy as np import torchvision.transforms as transforms import torch from torch.utils.data import DataLoader from torchvision.datasets import EMNIST def ceil(x: float): return int(np.ceil(x)) class MyEMNIST(EMNIST): """ Reimplements get_item to transform tensor input to pil image before applying transformation. """ def __getitem__(self, index): img, target = self.data[index], self.targets[index] # doing this so that it is consistent with all other datasets # to return a PIL Image img = transforms.ToPILImage()(img) if self.target_transform is not None: target = self.target_transform(target) if self.transform is not None: img = self.transform(img) return img, target class OEEMNIST(EMNIST): def __init__(self, size: torch.Size, root: str = None, split='letters', limit_var=20): # split = Train """ Outlier Exposure dataset for EMNIST. :param size: size of the samples in n x c x h x w, samples will be resized to h x w. If n is larger than the number of samples available in EMNIST, dataset will be enlarged by repetitions to fit n. This is important as exactly n images are extracted per iteration of the data_loader. For online supervision n should be set to 1 because only one sample is extracted at a time. :param root: root directory where data is found or is to be downloaded to. :param split: The dataset has 6 different splits: ``byclass``, ``bymerge``, ``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies which one to use. :param limit_var: limits the number of different samples, i.e. randomly chooses limit_var many samples from all available ones to be the training data. """ assert len(size) == 3 and size[1] == size[2] root = pt.join(root, 'emnist', ) transform = transforms.Compose([ transforms.Resize((size[1], size[2])), transforms.ToTensor() ]) super().__init__(root, split, transform=transform, download=True) self.size = size self.data = self.data.transpose(1, 2) self.idx_to_class = {v: k for k, v in self.class_to_idx.items()} if limit_var is not None and limit_var < len(self): picks = np.random.choice(np.arange(self.data.size(0)), size=limit_var, replace=False) self.data = self.data[picks] self.targets = self.targets[picks] if limit_var is not None and limit_var > len(self): print( 'OEEMNIST shall be limited to {} samples, but Cifar100 contains only {} samples, thus using all.' .format(limit_var, len(self)) ) if len(self) < size[0]: rep = ceil(size[0] / len(self)) old = len(self) self.data = self.data.repeat(rep, 1, 1) self.targets = self.targets.repeat(rep) if rep != size[0] / old: import warnings warnings.warn( 'OEEMNIST has been limited to {} samples. ' 'Due to the requested size of {}, the dataset will be enlarged. ' 'But {} repetitions will make some samples appear more often than others in the dataset, ' 'because the final size after repetitions is {}, which is cut to {}' .format(limit_var, size[0], rep, len(self), size[0]) ) def data_loader(self): return DataLoader(dataset=self, batch_size=self.size[0], shuffle=True, num_workers=0) def __getitem__(self, index): sample, target = super().__getitem__(index) sample = sample.squeeze().mul(255).byte() return sample
import numpy as np import torch import torch.nn as nn from rgb_stacking.utils.utils import init class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class Sum(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): return torch.sum(x, self.dim) class Mean(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): return torch.mean(x, self.dim) def init_rec(rec): for name, param in rec.named_parameters(): if 'bias' in name: nn.init.constant_(param, 0) elif 'weight' in name: nn.init.orthogonal_(param) return rec def init_(m): return init(m, nn.init.orthogonal_, lambda x: nn.init. constant_(x, 0), np.sqrt(2))
#!/usr/bin/env python # coding=utf-8 # Stan 2018-08-04 import sys if sys.version_info >= (3,): class aStr(): def __str__(self): return self.__unicode__() def cmp(a, b): return (a > b) - (a < b) # range = range def b(s): return s.encode('utf-8') def u(s): return s.decode('utf-8') # bytes = bytes unicode = str string_types = str, numeric_types = int, float, complex simple_types = int, float, complex, str, bytearray collections_types = list, tuple, set, frozenset all_types = (int, float, complex, str, bytearray, list, tuple, set, frozenset, dict) else: class aStr(): def __str__(self): return self.__unicode__().encode('utf-8') # cmp = cmp range = xrange def b(s): return s def u(s): return s bytes = str # unicode = unicode string_types = basestring, numeric_types = int, long, float, complex simple_types = int, long, float, complex, basestring, bytearray collections_types = list, tuple, set, frozenset all_types = (int, long, float, complex, basestring, bytearray, list, tuple, set, frozenset, dict)
# A four-digit integer is given. Find the sum of even digits in it. # Create a variable "var_int" and assign it a four-digit integer value. # Create a variable "sum_even" and assign it 0. # Find the sum of the even digits in the variable "var_int". var_int = 1184 sum_even = 0 x1 = var_int % 10 var_int //= 10 sum_even += (x1 + 1) % 2 * x1 x2 = var_int % 10 var_int //= 10 sum_even += (x2 + 1) % 2 * x2 x3 = var_int % 10 var_int //= 10 sum_even += (x3 + 1) % 2 * x3 x4 = var_int % 10 var_int //= 10 sum_even += (x4 + 1) % 2 * x4 print(sum_even)
# Permafrost Forms from django.conf import settings from django.contrib.auth.models import Permission from django.contrib.sites.models import Site from django.core.exceptions import ValidationError from django.forms import ModelForm from django.forms.fields import CharField, ChoiceField, BooleanField from django.forms.models import ModelMultipleChoiceField from django.forms.widgets import CheckboxInput from django.utils.translation import ugettext_lazy as _ from .models import PermafrostRole, get_optional_by_category, get_choices CHOICES = [('', _("Choose Role Type"))] + get_choices() LABELS = { 'name': _('Role Name'), 'category': _('Role Type') } def assemble_optiongroups_for_widget(permissions): choices = [] optgroups = {} if permissions: for perm in permissions: if perm.content_type.name in optgroups: optgroups[perm.content_type.name].append((perm.pk, perm.name,)) else: optgroups[perm.content_type.name] = [(perm.pk, perm.name,)] for model_name, options in optgroups.items(): choices.append([model_name, options]) return choices def bootstrappify(fields): for field in fields: widget = fields[field].widget if not isinstance(widget, CheckboxInput): if 'class' in widget.attrs: widget.attrs['class'] = widget.attrs['class'] + " form-control" else: widget.attrs.update({'class':'form-control'}) class SelectPermafrostRoleTypeForm(ModelForm): name = CharField(required=False) description = CharField(required=False) category = ChoiceField(choices=CHOICES) class Meta: model = PermafrostRole fields = ('name', 'description', 'category',) labels = LABELS def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) bootstrappify(self.fields) class PermafrostRoleCreateForm(ModelForm): permissions = ModelMultipleChoiceField(queryset=Permission.objects.all(), required=False) class Meta: model = PermafrostRole fields = ('name', 'description', 'category', 'permissions') labels = LABELS def __init__(self, *args, **kwargs): self.site = kwargs.pop('site', Site.objects.get_current()) super().__init__(*args, **kwargs) self.fields['category'].choices = CHOICES category = self.initial.get( 'category', self.data.get('category', None) ) if self.instance: category = self.instance.category if self.instance.category else category if category: all_optional_permissions = get_optional_by_category(category=category) ids = [perm.pk for perm in all_optional_permissions] self.fields['permissions'].queryset = Permission.objects.filter(id__in=ids) bootstrappify(self.fields) def save(self, commit=True): self.instance.site = self.site instance = super().save(commit) category = instance.category if 'permissions' in self.cleaned_data: perm_ids = [] if category: perm_ids = self.cleaned_data['permissions'] if perm_ids: instance.permissions_set(Permission.objects.filter(id__in=perm_ids)) else: instance.permissions_clear() return instance def clean_name(self): name = self.cleaned_data['name'] name_exists = False if self.instance: ## on update check if name change exists if 'name' in self.changed_data: name_exists = PermafrostRole.objects.filter( name=name, site=self.site, ).exclude(pk=self.instance.pk).first() else: try: name_exists = PermafrostRole.objects.get( name=name, site=self.site ) except PermafrostRole.DoesNotExist: pass if name_exists: raise ValidationError('Role with this name already exists') # Always return field return name class PermafrostRoleUpdateForm(PermafrostRoleCreateForm): """ Form used to display role detail Only allowed to edit optional permissions, name and description Category and required permissions stay locked """ deleted = BooleanField(required=False) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['category'].widget.attrs.update({'readonly': True, 'disabled': True}) self.fields['category'].disabled = True self.fields['category'].required = False self.fields['category'].choices = [choice for choice in CHOICES if choice[0] == self.instance.category] self.fields['category'].initial = self.instance.category ## limit choices to saved category self.fields['deleted'].initial = self.instance.deleted def save(self, commit=True): if self.cleaned_data['deleted']: self.instance.deleted = self.cleaned_data['deleted'] instance = super().save(commit) return instance
import os import numpy as np import pandas as pd from qlib.data.dataset.processor import Processor from qlib.data.dataset.utils import fetch_df_by_index from typing import Dict class HighFreqTrans(Processor): def __init__(self, dtype: str = "bool"): self.dtype = dtype def fit(self, df_features): pass def __call__(self, df_features): if self.dtype == "bool": return df_features.astype(np.int8) else: return df_features.astype(np.float32) class HighFreqNorm(Processor): def __init__( self, fit_start_time: pd.Timestamp, fit_end_time: pd.Timestamp, feature_save_dir: str, norm_groups: Dict[str, int], ): self.fit_start_time = fit_start_time self.fit_end_time = fit_end_time self.feature_save_dir = feature_save_dir self.norm_groups = norm_groups def fit(self, df_features) -> None: if os.path.exists(self.feature_save_dir) and len(os.listdir(self.feature_save_dir)) != 0: return os.makedirs(self.feature_save_dir) fetch_df = fetch_df_by_index(df_features, slice(self.fit_start_time, self.fit_end_time), level="datetime") del df_features index = 0 names = {} for name, dim in self.norm_groups.items(): names[name] = slice(index, index + dim) index += dim for name, name_val in names.items(): df_values = fetch_df.iloc(axis=1)[name_val].values if name.endswith("volume"): df_values = np.log1p(df_values) self.feature_mean = np.nanmean(df_values) np.save(self.feature_save_dir + name + "_mean.npy", self.feature_mean) df_values = df_values - self.feature_mean self.feature_std = np.nanstd(np.absolute(df_values)) np.save(self.feature_save_dir + name + "_std.npy", self.feature_std) df_values = df_values / self.feature_std np.save(self.feature_save_dir + name + "_vmax.npy", np.nanmax(df_values)) np.save(self.feature_save_dir + name + "_vmin.npy", np.nanmin(df_values)) return def __call__(self, df_features): if "date" in df_features: df_features.droplevel("date", inplace=True) df_values = df_features.values index = 0 names = {} for name, dim in self.norm_groups.items(): names[name] = slice(index, index + dim) index += dim for name, name_val in names.items(): feature_mean = np.load(self.feature_save_dir + name + "_mean.npy") feature_std = np.load(self.feature_save_dir + name + "_std.npy") if name.endswith("volume"): df_values[:, name_val] = np.log1p(df_values[:, name_val]) df_values[:, name_val] -= feature_mean df_values[:, name_val] /= feature_std df_features = pd.DataFrame(data=df_values, index=df_features.index, columns=df_features.columns) return df_features.fillna(0)
import torch import torch.nn as nn use_cuda = torch.cuda.is_available() class CNNClassifier(nn.Module): def __init__(self, channel, SHHS=False): super(CNNClassifier, self).__init__() conv1 = nn.Conv2d(1, 10, (1, 200)) pool1 = nn.MaxPool2d((1, 2)) if channel == 1: conv2 = nn.Conv2d(10, 20, (1, 32)) conv3 = nn.Conv2d(20, 30, (1, 128)) conv4 = nn.Conv2d(30, 40, (1, 512)) freq = 1 else: conv2 = nn.Conv2d(10, 20, (2, 32)) conv3 = nn.Conv2d(20, 30, (2, 128)) conv4 = nn.Conv2d(30, 40, (2, 512)) freq=channel-3 pool2 = nn.MaxPool2d((1, 2)) self.conv_module = nn.Sequential(conv1, nn.ReLU(), pool1, conv2, nn.ReLU(), conv3, nn.ReLU(), conv4, nn.ReLU(), pool2) if SHHS: fc1 = nn.Linear(freq * 40 * 553, 100) else: fc1 = nn.Linear(freq*40*365, 100) fc2 = nn.Linear(100, 5) self.fc_module = nn.Sequential(fc1, nn.ReLU(), fc2) if use_cuda: self.conv_module = self.conv_module.cuda() self.fc_module = self.fc_module.cuda() def forward(self, x, isfc): out = self.conv_module(x) dim = 1 for d in out.size()[1:]: dim *= d if isfc: out = out.view(-1, dim) out = self.fc_module(out) else: out = out.permute(0, 3, 2, 1).reshape([-1, 200, 73]) return out
from math import sqrt # function with int parameter def my_function(a: str): print(a) my_function(3) # function with type annotation def my_function2(a: str) -> str: return a print(my_function2(3)) # import sqrt from math and use it print(sqrt(9.4323)) # import alias from math # from math import sqrt as square_root # function with list parameter def my_function3(a: list): for i in a: print(i) my_function3([1, 2, 3, 4, 5]) # function with dictionary parameter def my_function4(a: dict): for key, value in a.items(): print(key, value) my_function4({'a': 1, 'b': 2, 'c': 3}) # function with tuple parameter def my_function5(a: tuple): for i in a: print(i) my_function5(('a', 'b', 'c', 'd')) # function with set parameter def my_function6(a: set): for i in a: print(i) my_function6({'a', 'b', 'c', 'd'}) # function with function parameter def my_function7(a: callable): a() # make an http request async async def my_function8(a: callable): a() # my_function8(lambda: print('hello'))
import cv_datetime_utils import cv2 as cv import numpy as np import matplotlib.pyplot as plt import scipy.optimize import json import os def compose_transformations( rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2): rotation_vector_1 = np.asarray(rotation_vector_1).reshape(3) translation_vector_1 = np.asarray(translation_vector_1).reshape(3) rotation_vector_2 = np.asarray(rotation_vector_2).reshape(3) translation_vector_2 = np.asarray(translation_vector_2).reshape(3) rotation_vector_composed, translation_vector_composed = cv.composeRT( rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2)[:2] rotation_vector_composed = np.squeeze(rotation_vector_composed) translation_vector_composed = np.squeeze(translation_vector_composed) return rotation_vector_composed, translation_vector_composed def invert_transformation( rotation_vector, translation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) new_rotation_vector, new_translation_vector = compose_transformations( np.array([0.0, 0.0, 0.0]), -translation_vector, -rotation_vector, np.array([0.0, 0.0, 0.0])) new_rotation_vector = np.squeeze(new_rotation_vector) new_translation_vector = np.squeeze(new_translation_vector) return new_rotation_vector, new_translation_vector def quaternion_vector_to_rotation_vector(quaternion_vector): quaternion_vector = np.asarray(quaternion_vector).reshape(4) spatial_vector = quaternion_vector[1:] qw = quaternion_vector[0] spatial_vector_length = np.linalg.norm(spatial_vector) unit_vector = spatial_vector/spatial_vector_length theta = 2*np.arctan2(spatial_vector_length, qw) rotation_vector = theta*unit_vector return rotation_vector def quaternion_vector_to_rotation_matrix(quaternion_vector): quaternion_tuple = tuple(np.asarray(quaternion_vector).reshape(4)) qw, qx, qy, qz = quaternion_tuple R = np.array([ [qw**2 + qx**2 - qy**2 - qz**2, 2*(qx*qy - qw*qz), 2*(qw*qy + qx*qz)], [2*(qx*qy + qw*qz), qw**2 - qx**2 + qy**2 - qz**2, 2*(qy*qz - qw*qx)], [2*(qx*qz - qw*qy), 2*(qw*qx + qy*qz), qw**2 - qx**2 - qy**2 + qz**2] ]) return R def rotation_vector_to_rotation_matrix(rotation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) rotation_matrix = cv.Rodrigues(rotation_vector)[0] return rotation_matrix def transform_object_points( object_points, rotation_vector=np.array([0.0, 0.0, 0.0]), translation_vector=np.array([0.0, 0.0, 0.0])): object_points = np.asarray(object_points) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector) if object_points.size == 0: return object_points object_points = object_points.reshape((-1, 3)) rotation_vector = rotation_vector.reshape(3) translation_vector = translation_vector.reshape(3) transformed_points = np.add( np.matmul( cv.Rodrigues(rotation_vector)[0], object_points.T).T, translation_vector.reshape((1, 3))) transformed_points = np.squeeze(transformed_points) return transformed_points def generate_camera_pose( camera_position=np.array([0.0, 0.0, 0.0]), yaw=0.0, pitch=0.0, roll=0.0): # yaw: 0.0 points north (along the positive y-axis), positive angles rotate counter-clockwise # pitch: 0.0 is level with the ground, positive angles rotate upward # roll: 0.0 is level with the ground, positive angles rotate clockwise # All angles in radians camera_position = np.asarray(camera_position).reshape(3) # First: Move the camera to the specified position rotation_vector_1 = np.array([0.0, 0.0, 0.0]) translation_vector_1 = -camera_position # Second: Rotate the camera so when we lower to the specified inclination, it will point in the specified compass direction rotation_vector_2 = np.array([0.0, 0.0, -(yaw - np.pi / 2)]) translation_vector_2 = np.array([0.0, 0.0, 0.0]) # Third: Lower to the specified inclination rotation_vector_2_3 = np.array([(np.pi / 2 - pitch), 0.0, 0.0]) translation_vector_2_3 = np.array([0.0, 0.0, 0.0]) # Fourth: Roll the camera by the specified angle rotation_vector_2_3_4 = np.array([0.0, 0.0, -roll]) translation_vector_2_3_4 = np.array([0.0, 0.0, 0.0]) # Combine these four moves rotation_vector_1_2, translation_vector_1_2 = compose_transformations( rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2) rotation_vector_1_2_3, translation_vector_1_2_3 = compose_transformations( rotation_vector_1_2, translation_vector_1_2, rotation_vector_2_3, translation_vector_2_3) rotation_vector, translation_vector = compose_transformations( rotation_vector_1_2_3, translation_vector_1_2_3, rotation_vector_2_3_4, translation_vector_2_3_4) rotation_vector = np.squeeze(rotation_vector) translation_vector = np.squeeze(translation_vector) return rotation_vector, translation_vector def extract_camera_position( rotation_vector, translation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) new_rotation_vector, new_translation_vector = compose_transformations( rotation_vector, translation_vector, -rotation_vector, np.array([0.0, 0.0, 0.0])) camera_position = -np.squeeze(new_translation_vector) return camera_position def extract_camera_position_rotation_matrix(rotation_matrix, translation_vector): rotation_matrix = np.asarray(rotation_matrix).reshape((3,3)) translation_vector = np.asarray(translation_vector).reshape(3) position = np.matmul(rotation_matrix.T, -translation_vector.T) return position def extract_camera_direction( rotation_vector, translation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) camera_direction = np.matmul( cv.Rodrigues(-rotation_vector)[0], np.array([[0.0], [0.0], [1.0]])) camera_direction = np.squeeze(camera_direction) return camera_direction def reconstruct_z_rotation(x, y): if x >= 0.0 and y >= 0.0: return np.arctan(y / x) if x >= 0.0 and y < 0.0: return np.arctan(y / x) + 2 * np.pi return np.arctan(y / x) + np.pi # Currently unused; needs to be fixed up for cases in which x and/or y are close # to zero def extract_yaw_from_camera_direction( camera_direction): camera_direction = np.asarray(camera_direction).reshape(3) yaw = reconstruct_z_rotation( camera_direction[0], camera_direction[1]) return yaw def generate_camera_matrix( focal_length, principal_point): focal_length = np.asarray(focal_length).reshape(2) principal_point = np.asarray(principal_point).reshape(2) camera_matrix = np.array([ [focal_length[0], 0, principal_point[0]], [0, focal_length[1], principal_point[1]], [0, 0, 1.0]]) return camera_matrix def generate_projection_matrix( camera_matrix, rotation_vector, translation_vector): camera_matrix = np.asarray(camera_matrix).reshape((3, 3)) rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) projection_matrix = np.matmul( camera_matrix, np.concatenate(( cv.Rodrigues(rotation_vector)[0], translation_vector.reshape((3, 1))), axis=1)) return(projection_matrix) def ground_grid_camera_view( image_width, image_height, rotation_vector, translation_vector, camera_matrix, distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]), fill_image=False, step=0.1 ): grid_corners = ground_rectangle_camera_view( image_width=image_width, image_height=image_height, rotation_vector=rotation_vector, translation_vector=translation_vector, camera_matrix=camera_matrix, distortion_coefficients=distortion_coefficients, fill_image=fill_image ) grid_points = generate_ground_grid( grid_corners=grid_corners, step=step ) return grid_points def ground_rectangle_camera_view( image_width, image_height, rotation_vector, translation_vector, camera_matrix, distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]), fill_image=False ): image_points = np.array([ [0.0, 0.0], [image_width, 0.0], [image_width, image_height], [0.0, image_height] ]) ground_points=np.empty((4, 3)) for i in range(4): ground_points[i] = ground_point( image_point=image_points[i], rotation_vector=rotation_vector, translation_vector=translation_vector, camera_matrix=camera_matrix, distortion_coefficients=distortion_coefficients ) x_values_sorted = np.sort(ground_points[:, 0]) y_values_sorted = np.sort(ground_points[:, 1]) if fill_image: x_min = x_values_sorted[0] x_max = x_values_sorted[3] y_min = y_values_sorted[0] y_max = y_values_sorted[3] else: x_min = x_values_sorted[1] x_max = x_values_sorted[2] y_min = y_values_sorted[1] y_max = y_values_sorted[2] return np.array([ [x_min, y_min], [x_max, y_max] ]) def ground_point( image_point, rotation_vector, translation_vector, camera_matrix, distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]) ): image_point = np.asarray(image_point) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector) camera_matrix = np.asarray(camera_matrix) distortion_coefficients = np.asarray(distortion_coefficients) image_point = image_point.reshape((2)) rotation_vector = rotation_vector.reshape(3) translation_vector = translation_vector.reshape(3) camera_matrix = camera_matrix.reshape((3, 3)) image_point_undistorted = cv.undistortPoints( image_point, camera_matrix, distortion_coefficients, P=camera_matrix ) image_point_undistorted = np.squeeze(image_point_undistorted) camera_position = np.matmul( cv.Rodrigues(-rotation_vector)[0], -translation_vector.T ).T camera_point_homogeneous = np.matmul( np.linalg.inv(camera_matrix), np.array([image_point_undistorted[0], image_point_undistorted[1], 1.0]).T ).T camera_direction = np.matmul( cv.Rodrigues(-rotation_vector)[0], camera_point_homogeneous.T ).T theta = -camera_position[2]/camera_direction[2] ground_point = camera_position + theta*camera_direction return ground_point def generate_ground_grid( grid_corners, step=0.1 ): x_grid, y_grid = np.meshgrid( np.arange(grid_corners[0, 0], grid_corners[1, 0], step=step), np.arange(grid_corners[0, 1], grid_corners[1, 1], step=step) ) grid = np.stack((x_grid, y_grid, np.full_like(x_grid, 0.0)), axis=-1) points = grid.reshape((-1, 3)) return points def project_points( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients, remove_behind_camera=False, remove_outside_frame=False, image_corners=None ): object_points = np.asarray(object_points).reshape((-1, 3)) rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) camera_matrix = np.asarray(camera_matrix).reshape((3, 3)) distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients)) if object_points.size == 0: return np.zeros((0, 2)) image_points = cv.projectPoints( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients )[0] if remove_behind_camera: behind_camera_boolean = behind_camera( object_points, rotation_vector, translation_vector ) image_points[behind_camera_boolean] = np.array([np.nan, np.nan]) if remove_outside_frame: outside_frame_boolean = outside_frame( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients, image_corners ) image_points[outside_frame_boolean] = np.array([np.nan, np.nan]) image_points = np.squeeze(image_points) return image_points def behind_camera( object_points, rotation_vector, translation_vector): object_points = np.asarray(object_points) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector) if object_points.size == 0: return np.zeros((0, 2)) object_points = object_points.reshape((-1, 3)) rotation_vector = rotation_vector.reshape(3) translation_vector = translation_vector.reshape(3) object_points_transformed = transform_object_points( object_points, rotation_vector, translation_vector ) behind_camera_boolean = (object_points_transformed <= 0)[..., 2] return behind_camera_boolean def outside_frame( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients, image_corners ): object_points = np.asarray(object_points).reshape((-1, 3)) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector).reshape(3) camera_matrix = np.asarray(camera_matrix).reshape((3,3)) distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients)) image_corners = np.asarray(image_corners).reshape((2,2)) if object_points.size == 0: return np.zeros((0, 2)) image_points = cv.projectPoints( object_points, rotation_vector, translation_vector, camera_matrix, np.array([0.0, 0.0, 0.0, 0.0]) )[0] image_points = image_points.reshape((-1, 2)) outside_frame_boolean = ( (image_points[:, 0] < image_corners[0, 0]) | (image_points[:, 0] > image_corners[1, 0]) | (image_points[:, 1] < image_corners[0, 1]) | (image_points[:, 1] > image_corners[1, 1]) ) return outside_frame_boolean def undistort_points( image_points, camera_matrix, distortion_coefficients): image_points = np.asarray(image_points) camera_matrix = np.asarray(camera_matrix) distortion_coefficients = np.asarray(distortion_coefficients) if image_points.size == 0: return image_points image_points = image_points.reshape((-1, 1, 2)) camera_matrix = camera_matrix.reshape((3, 3)) undistorted_points = cv.undistortPoints( image_points, camera_matrix, distortion_coefficients, P=camera_matrix) undistorted_points = np.squeeze(undistorted_points) return undistorted_points def estimate_camera_pose_from_image_points( image_points_1, image_points_2, camera_matrix, rotation_vector_1=np.array([0.0, 0.0, 0.0]), translation_vector_1=np.array([0.0, 0.0, 0.0]), distance_between_cameras=1.0): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) if image_points_1.size == 0 or image_points_2.size == 0: raise ValueError('One or both sets of image points appear to be empty') image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) essential_matrix, mask = cv.findEssentialMat( image_points_1, image_points_2, camera_matrix) relative_rotation_matrix, relative_translation_vector = cv.recoverPose( essential_matrix, image_points_1, image_points_2, camera_matrix, mask=mask)[1:3] relative_rotation_vector = cv.Rodrigues(relative_rotation_matrix)[0] relative_translation_vector = relative_translation_vector * distance_between_cameras rotation_vector_2, translation_vector_2 = compose_transformations( rotation_vector_1, translation_vector_1, relative_rotation_vector, relative_translation_vector) rotation_vector_2 = np.squeeze(rotation_vector_2) translation_vector_2 = np.squeeze(translation_vector_2) return rotation_vector_2, translation_vector_2 def reconstruct_object_points_from_camera_poses( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) rotation_vector_2 = np.asarray(rotation_vector_2) translation_vector_2 = np.asarray(translation_vector_2) if image_points_1.size == 0 or image_points_2.size == 0: return np.zeros((0, 3)) image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) rotation_vector_2 = rotation_vector_2.reshape(3) translation_vector_2 = translation_vector_2.reshape(3) projection_matrix_1 = generate_projection_matrix( camera_matrix, rotation_vector_1, translation_vector_1) projection_matrix_2 = generate_projection_matrix( camera_matrix, rotation_vector_2, translation_vector_2) object_points_homogeneous = cv.triangulatePoints( projection_matrix_1, projection_matrix_2, image_points_1.T, image_points_2.T) object_points = cv.convertPointsFromHomogeneous( object_points_homogeneous.T) object_points = np.squeeze(object_points) return object_points def reconstruct_object_points_from_relative_camera_pose( image_points_1, image_points_2, camera_matrix, relative_rotation_vector, relative_translation_vector, rotation_vector_1=np.array([[0.0], [0.0], [0.0]]), translation_vector_1=np.array([[0.0], [0.0], [0.0]]), distance_between_cameras=1.0): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) relative_rotation_vector = np.asarray(relative_rotation_vector) relative_translation_vector = np.asarray(relative_translation_vector) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) if image_points_1.size == 0 or image_points_2.size == 0: return np.zeros((0, 3)) image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) relative_rotation_vector = relative_rotation_vector.reshape(3) relative_translation_vector = relative_translation_vector.reshape(3) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) rotation_vector_2, translation_vector_2 = cv.composeRT( rotation_vector_1, translation_vector_1, relative_rotation_vector, relative_translation_vector * distance_between_cameras)[:2] object_points = reconstruct_object_points_from_camera_poses( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2) return object_points def reconstruct_object_points_from_image_points( image_points_1, image_points_2, camera_matrix, rotation_vector_1=np.array([[0.0], [0.0], [0.0]]), translation_vector_1=np.array([[0.0], [0.0], [0.0]]), distance_between_cameras=1.0): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) if image_points_1.size == 0 or image_points_2.size == 0: return np.zeros((0, 3)) image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) rotation_vector_2, translation_vector_2 = estimate_camera_pose_from_image_points( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, distance_between_cameras) object_points = reconstruct_object_points_from_camera_poses( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2) return object_points def estimate_camera_pose_from_plane_object_points( input_object_points, height, origin_index, x_axis_index, y_reference_point, y_reference_point_sign, distance_calibration_indices, calibration_distance): input_object_points = np.asarray(input_object_points) if input_object_points.size == 0: raise ValueError('Obect point array appears to be empty') input_object_points = input_object_points.reshape((-1, 3)) scale_factor = np.divide( calibration_distance, np.linalg.norm( np.subtract( input_object_points[distance_calibration_indices[0]], input_object_points[distance_calibration_indices[1]]))) object_points_1 = np.multiply( input_object_points, scale_factor) def objective_function(parameters): rotation_x = parameters[0] rotation_y = parameters[1] translation_z = parameters[2] object_points_transformed = transform_object_points( object_points_1, np.array([rotation_x, rotation_y, 0.0]), np.array([0.0, 0.0, translation_z])) return np.sum(np.square(object_points_transformed[:, 2] - height)) optimization_solution = scipy.optimize.minimize( objective_function, np.array([0.0, 0.0, 0.0])) rotation_x_a = optimization_solution['x'][0] rotation_y_a = optimization_solution['x'][1] translation_z_a = optimization_solution['x'][2] rotation_x_rotation_y_a_norm = np.linalg.norm([rotation_x_a, rotation_y_a]) rotation_x_b = rotation_x_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm) rotation_y_b = rotation_y_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm) translation_z_b = - translation_z_a rotation_vector_2_a = np.array([rotation_x_a, rotation_y_a, 0.0]) translation_vector_2_a = np.array([0.0, 0.0, translation_z_a]) object_points_2_a = transform_object_points( object_points_1, rotation_vector_2_a, translation_vector_2_a) rotation_vector_2_b = np.array([rotation_x_b, rotation_y_b, 0.0]) translation_vector_2_b = np.array([0.0, 0.0, translation_z_b]) object_points_2_b = transform_object_points( object_points_1, rotation_vector_2_b, translation_vector_2_b) sign_a = np.sign( np.cross( np.subtract( object_points_2_a[x_axis_index], object_points_2_a[origin_index]), np.subtract( object_points_2_a[y_reference_point], object_points_2_a[origin_index]))[2]) sign_b = np.sign( np.cross( np.subtract( object_points_2_b[x_axis_index], object_points_2_b[origin_index]), np.subtract( object_points_2_b[y_reference_point], object_points_2_b[origin_index]))[2]) if sign_a == y_reference_point_sign: rotation_vector_2 = rotation_vector_2_a translation_vector_2 = translation_vector_2_a object_points_2 = object_points_2_a else: rotation_vector_2 = rotation_vector_2_b translation_vector_2 = translation_vector_2_b object_points_2 = object_points_2_b xy_shift = - object_points_2[origin_index, :2] rotation_vector_3 = np.array([0.0, 0.0, 0.0]) translation_vector_3 = np.array([xy_shift[0], xy_shift[1], 0.0]) object_points_3 = transform_object_points( object_points_2, rotation_vector_3, translation_vector_3) final_z_rotation = - reconstruct_z_rotation( object_points_3[x_axis_index, 0], object_points_3[x_axis_index, 1]) rotation_vector_4 = np.array([0.0, 0.0, final_z_rotation]) translation_vector_4 = np.array([0.0, 0.0, 0.0]) object_points_4 = transform_object_points( object_points_3, rotation_vector_4, translation_vector_4) rotation_vector_2_3, translation_vector_2_3 = compose_transformations( rotation_vector_2, translation_vector_2, rotation_vector_3, translation_vector_3) rotation_vector_2_3_4, translation_vector_2_3_4 = compose_transformations( rotation_vector_2_3, translation_vector_2_3, rotation_vector_4, translation_vector_4) camera_rotation_vector, camera_translation_vector = invert_transformation( rotation_vector_2_3_4, translation_vector_2_3_4) return camera_rotation_vector, camera_translation_vector, scale_factor, object_points_4 def estimate_camera_poses_from_plane_image_points( image_points_1, image_points_2, camera_matrix, height, origin_index, x_axis_index, y_reference_point, y_reference_point_sign, distance_calibration_indices, calibration_distance): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) if image_points_1.size == 0 or image_points_2.size == 0: raise ValueError('One or both sets of image points appear to be empty') image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) relative_rotation_vector, relative_translation_vector = estimate_camera_pose_from_image_points( image_points_1, image_points_2, camera_matrix) input_object_points = reconstruct_object_points_from_image_points( image_points_1, image_points_2, camera_matrix) rotation_vector_1, translation_vector_1, scale_factor = estimate_camera_pose_from_plane_object_points( input_object_points, height, origin_index, x_axis_index, y_reference_point, y_reference_point_sign, distance_calibration_indices, calibration_distance)[:3] rotation_vector_2, translation_vector_2 = compose_transformations( rotation_vector_1, translation_vector_1, relative_rotation_vector, relative_translation_vector * scale_factor) return rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2
""" Client Run by the evaluator, sends a TLS Client Hello with the ESNI extension, followed by two test packets. """ import argparse import binascii as bi import os import socket import time socket.setdefaulttimeout(1) from plugins.plugin_client import ClientPlugin class ESNIClient(ClientPlugin): """ Defines the ESNI client. """ name = "esni" def __init__(self, args): """ Initializes the esni client. """ ClientPlugin.__init__(self) self.args = args @staticmethod def get_args(command): """ Defines required args for this plugin """ super_args = ClientPlugin.get_args(command) parser = argparse.ArgumentParser(description='ESNI Client') parser.add_argument('--server', action='store', help="server to connect to") args, _ = parser.parse_known_args(command) args = vars(args) super_args.update(args) return super_args def run(self, args, logger, engine=None): """ Try to make a forbidden GET request to the server. """ fitness = 0 port = int(args["port"]) server = args["server"] # Client Hello with the ESNI extension msg = b'16030103ae010003aa0303d992f9c22fbe7a7cdbc9619924bd9cc13c057f5f3da1829426cb0944292705152033c5be80af6de7633e07680125e27e3f7b80ff5e9b3cbe5278434c90b9e0e5fa0024130113031302c02bc02fcca9cca8c02cc030c00ac009c013c014009c009d002f0035000a0100033d00170000ff01000100000a000e000c001d00170018001901000101000b000201000010000e000c02683208687474702f312e310005000501000000000033006b0069001d002019570ada256d971048b34d3e9ff5607588bf10cfb6c064fc45a0fc401d9a7c470017004104ea047fd2e0fc3314de4bf03ee6205134f0d15c07f62b77625a95dc194ce8fb88cc16e53c8b400ba463915b87480b247851c095abdb0d3d5d5b14dd77dcd73750002b00050403040303000d0018001604030503060308040805080604010501060102030201002d00020101ffce016e1301001d00203652aaf122dc47dcf9fa8c37377476d050e54119adfb518f7aabd842ac97d23b00205a30e70593f57708370310ecf7054e488a62eb11e01fd059851c442d453d15c5012441910eec152c4df5ff28bf5cddb1a2e54e8595197e3dc36325145ad50a7842eb3860c8fc6ac5c1794017101365c6122abb3b81f31f5f4204eebb244252d22600734424d875948657b892d3aab3310491aff3b5126f1186bd9c321fb446cf2a41985dd206364ea28c3f8aafeafc62e039f157c3f2703a35448d2d16dcf2d5055ce58c024a5b4eb780fc5128af4ba4e90d6eef1b3cf30a5b2000448d65d6af4fffabeb91e1ed2093fdcc6ffd87ceb94429864ddb657e6316654631193fd25840e51645e1708d351140dd6eeefb80ddbaebb250b2975a1d5f291d99f89de4553d083f1b9820a3ee6976357cff433b7eb77febb3eb0db012154154d3e19b4409f8afa11aa1baeb0b7663d97f0caca2b11ed971fc574588e76a37aa4259593fe8e07fbbca27fa001c00024001002900eb00c600c07f87fafe9de4168227aeec4540f1aaeae43ff61a353f5480420ac3c33f90003fe6f501080bf04f22576a0cc1db8dc83d37b25859a81ce0277364a1794cde1c60f3b94175477beff56db7f9e2b83b31383b7d8b5da20834fb0a63d7ba2e42ad3dfa21666ed8621f34273ac5c273d7f492750e3df3bae36e398ddf83d4a7c36f639087f14eb1f7bfb2c7c0c736d69bcdbf21158c07b7088b95e5bcd08138d6b511f6492d7d93bb3729641519097b970cfeffa5882c67111dcf5d7966a1c58b4edb6e8c905a002120e47ccba37d89e4c1d979c6ef954d1cd946eff0d3119aa2b4d6411138aec74579' try: client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.settimeout(5) client.connect((server, port)) client.sendall(bi.unhexlify(msg)) time.sleep(2) client.sendall(b"test packet") time.sleep(2) client.sendall(b"test packet 2") server_data = client.recv(1024) logger.debug("Data recieved: %s", server_data.decode('utf-8', 'ignore')) fitness += 100 client.close() except socket.timeout: # Happens on connect, not sendall logger.debug("Client: Timeout") fitness -= 110 except socket.error as exc: fitness -= 100 logger.exception("Socket error caught in client esni test.") except Exception: logger.exception("Exception caught in client esni test.") fitness = -120 finally: logger.debug("Client finished esni test.") return fitness * 4
""" Dump/export our own data to a local file. Script is installed as `location_dump`. """ import argparse import os import os.path import sys from sqlalchemy import text from ichnaea.db import ( configure_db, db_worker_session, ) from ichnaea.geocalc import bbox from ichnaea.log import ( configure_logging, LOGGER, ) from ichnaea.models import ( BlueShard, CellShard, WifiShard, ) from ichnaea import util def where_area(lat, lon, radius): # Construct a where clause based on a bounding box around the given # center point. if lat is None or lon is None or radius is None: return None max_lat, min_lat, max_lon, min_lon = bbox(lat, lon, radius) return '`lat` <= %s and `lat` >= %s and `lon` <= %s and `lon` >= %s' % ( round(max_lat, 5), round(min_lat, 5), round(max_lon, 5), round(min_lon, 5)) def dump_model(shard_model, session, fd, where=None): fd.write(shard_model.export_header() + '\n') for model in shard_model.shards().values(): LOGGER.info('Exporting table: %s', model.__tablename__) stmt = model.export_stmt() if where: stmt = stmt.replace(' WHERE ', ' WHERE %s AND ' % where) stmt = text(stmt) min_key = '' limit = 25000 while True: rows = session.execute( stmt.bindparams( export_key=min_key, limit=limit )).fetchall() if rows: buf = '\n'.join([row.export_value for row in rows]) if buf: buf += '\n' fd.write(buf) min_key = rows[-1].export_key else: break def dump_file(datatype, session, filename, lat=None, lon=None, radius=None): model = { 'blue': BlueShard, 'cell': CellShard, 'wifi': WifiShard, } where = where_area(lat, lon, radius) with util.gzip_open(filename, 'w') as fd: dump_model(model[datatype], session, fd, where=where) return 0 def main(argv, _db=None, _dump_file=dump_file): parser = argparse.ArgumentParser( prog=argv[0], description='Dump/export data.') parser.add_argument('--datatype', required=True, help='Type of the data file, blue, cell or wifi') parser.add_argument('--filename', required=True, help='Path to the csv.gz export file.') parser.add_argument('--lat', default=None, help='The center latitude of the desired area.') parser.add_argument('--lon', default=None, help='The center longitude of the desired area.') parser.add_argument('--radius', default=None, help='The radius of the desired area.') args = parser.parse_args(argv[1:]) if not args.filename: # pragma: no cover parser.print_help() return 1 filename = os.path.abspath(os.path.expanduser(args.filename)) if os.path.isfile(filename): # pragma: no cover print('File already exists.') return 1 datatype = args.datatype if datatype not in ('blue', 'cell', 'wifi'): # pragma: no cover print('Unknown data type.') return 1 lat, lon, radius = (None, None, None) if (args.lat is not None and args.lon is not None and args.radius is not None): lat = float(args.lat) lon = float(args.lon) radius = int(args.radius) configure_logging() db = configure_db('ro', transport='sync', _db=_db) with db_worker_session(db, commit=False) as session: exit_code = _dump_file( datatype, session, filename, lat=lat, lon=lon, radius=radius) return exit_code def console_entry(): # pragma: no cover sys.exit(main(sys.argv))
#!/usr/bin/python import serial import time ser = serial.Serial( port = '/dev/ttyACM1', baudrate = 9600, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE, bytesize = serial.EIGHTBITS ) while 1: ser.flush() line = ser.readline().decode().strip() gas, fire = line.split(",") print("gas-level: ", gas) print("fire-level: ", fire) time.sleep(1)
""" Convert an RDF graph into an image for displaying in the notebook, via GraphViz It has two parts: - conversion from rdf into dot language. Code based in rdflib.utils.rdf2dot - rendering of the dot graph into an image. Code based on ipython-hierarchymagic, which in turn bases it from Sphinx See https://github.com/tkf/ipython-hierarchymagic License for RDFLIB ------------------ Copyright (c) 2002-2015, RDFLib Team See CONTRIBUTORS and http://github.com/RDFLib/rdflib All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Daniel Krech nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License for ipython-hierarchymagic ---------------------------------- ipython-hierarchymagic is licensed under the term of the Simplified BSD License (BSD 2-clause license), as follows: Copyright (c) 2012 Takafumi Arakaki All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License for Sphinx ------------------ `run_dot` function and `HierarchyMagic._class_name` method in this extension heavily based on Sphinx code `sphinx.ext.graphviz.render_dot` and `InheritanceGraph.class_name`. Copyright notice for Sphinx can be found below. Copyright (c) 2007-2011 by the Sphinx team (see AUTHORS file). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import errno import base64 import re from io import StringIO import rdflib from .utils import escape import logging LOG = logging.getLogger(__name__) # ------------------------------------------------------------------------ LABEL_PROPERTIES = [ rdflib.RDFS.label, rdflib.URIRef('http://schema.org/name'), rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#label'), rdflib.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'), rdflib.URIRef("http://purl.org/dc/elements/1.1/title"), rdflib.URIRef("http://xmlns.com/foaf/0.1/name"), rdflib.URIRef("http://www.w3.org/2006/vcard/ns#fn"), rdflib.URIRef("http://www.w3.org/2006/vcard/ns#org"), ] def label(x, gr, preferred_languages=None): ''' @param x: graph entity @param gr (Graph): RDF graph @param preferred_languages (iterable): list of preferred language codes for the labels. Return the best available label in the graph for the passed entity. If a set of preferred languages is given, try them in order. If none is found, an arbitrary language will be chosen ''' # Find all labels & their language labels = {l.language: l for labelProp in LABEL_PROPERTIES for l in gr.objects(x, labelProp)} #LOG.debug("LABELS %s %s", labels, preferred_languages) #return repr(preferred_languages) + repr(labels) if labels: # Search the preferred language if preferred_languages is not None: for l in preferred_languages: if l in labels: return labels[l] # If not found, return an arbitrary language return labels.popitem()[1] # No labels available. Try to generate a QNAME, or else, the string itself try: return gr.namespace_manager.compute_qname(x)[2].replace('_', ' ') except Exception: # Attempt to extract the trailing part of an URI m = re.search('([^/]+)$', x) return m.group(1).replace('_', ' ') if m else x def rdf2dot(g, stream, opts={}): ''' Convert the RDF graph to DOT Write the dot output to the stream ''' LOG.debug("RDF2DOT: %s", opts) accept_lang = opts.get('lang', []) do_literal = opts.get('literal') nodes = {} def node_id(x): if x not in nodes: nodes[x] = "node%d" % len(nodes) return nodes[x] def qname(x, g): try: q = g.compute_qname(x) return q[0] + ":" + q[2] except Exception: return x def accept(node): if isinstance(node, (rdflib.URIRef, rdflib.BNode)): return True if not do_literal: return False return (not accept_lang) or (node.language in accept_lang) stream.write(u'digraph { \n node [ fontname="DejaVu Sans,Tahoma,Geneva,sans-serif" ] ; \n') # Write all edges. In the process make a list of all nodes for s, p, o in g: # skip triples for labels if p == rdflib.RDFS.label: continue # Create a link if both objects are graph nodes # (or, if literals are also included, if their languages match) if not (accept(s) and accept(o)): continue # add the nodes to the list sn = node_id(s) on = node_id(o) # add the link q = qname(p, g) if isinstance(p, rdflib.URIRef): opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s", href="%s", target="_other" ] ;\n' % (sn, on, q, p) else: opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s" ] ;\n' % (sn, on, q) stream.write(opstr) # Write all nodes for u, n in nodes.items(): lbl = escape(label(u, g, accept_lang), True) if isinstance(u, rdflib.URIRef): opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s", href="%s", target=_other ] \n' % (n, 'blue', lbl, u) else: opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s" ] \n' % (n, 'black', lbl) stream.write(u"# %s %s\n" % (u, n)) stream.write(opstr) stream.write(u'}\n') # ------------------------------------------------------------------------ EPIPE = getattr(errno, 'EPIPE', 0) EINVAL = getattr(errno, 'EINVAL', 0) def run_dot(code, fmt='svg', gv_options=[], **kwargs): ''' Run GraphViz on the buffer holding the graph ''' LOG.debug("rundot fmt=%s options=%s", fmt, gv_options) # mostly copied from sphinx.ext.graphviz.render_dot import os from subprocess import Popen, PIPE dot_args = [kwargs.get('prg', 'dot')] + gv_options + ['-T', fmt] if os.name == 'nt': # Avoid opening shell window. # * https://github.com/tkf/ipython-hierarchymagic/issues/1 # * http://stackoverflow.com/a/2935727/727827 p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=0x08000000) else: p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE) wentwrong = False try: # Graphviz may close standard input when an error occurs, # resulting in a broken pipe on communicate() stdout, stderr = p.communicate(code.encode('utf-8')) except OSError as err: if err.errno != EPIPE: raise wentwrong = True except IOError as err: if err.errno != EINVAL: raise wentwrong = True if wentwrong: # in this case, read the standard output and standard error streams # directly, to get the error message(s) stdout, stderr = p.stdout.read(), p.stderr.read() p.wait() if p.returncode != 0: raise RuntimeError(u'dot exited with error:\n[stderr]\n{0}' .format(stderr.decode('utf-8'))) return stdout # ------------------------------------------------------------------------ def draw_graph(g, fmt='svg', prg='dot', options={}): ''' Draw an RDF graph as an image ''' # Convert RDF to Graphviz buf = StringIO() rdf2dot(g, buf, options) gv_options = options.get('graphviz', []) if fmt == 'png': gv_options += ['-Gdpi=220', '-Gsize=25,10!'] metadata = {"width": 5500, "height": 2200, "unconfined": True} #import codecs #with codecs.open('/tmp/sparqlkernel-img.dot','w',encoding='utf-8') as f: # f.write( buf.getvalue() ) # Now use Graphviz to generate the graph image = run_dot(buf.getvalue(), fmt=fmt, options=gv_options, prg=prg) #with open('/tmp/sparqlkernel-img.'+fmt,'w') as f: # f.write( image ) # Return it if fmt == 'png': return {'image/png': base64.b64encode(image).decode('ascii')}, \ {'image/png': metadata} elif fmt == 'svg': img = image.decode('utf-8').replace('<svg', '<svg class="unconfined"', 1) return {'image/svg+xml': img}, \ {'unconfined': True}
""" LUME-Genesis primary class """ from genesis import archive, lattice, parsers, tools, writers import h5py import tempfile from time import time import shutil import os def find_genesis2_executable(genesis_exe=None, verbose=False): """ Searches for the genesis2 executable. """ if genesis_exe: exe = tools.full_path(genesis_exe) if os.path.exists(exe): if verbose: print(f'Using user provided executable: {exe}') return exe else: raise ValueError(f'Genesis executable does not exist: {exe}') for exe in [tools.full_path('$GENESIS_BIN'), shutil.which('genesis2')]: if os.path.exists(exe): if verbose: print(f'Using found executable: {exe}') return exe raise ValueError('No Genesisi executable found') class Genesis: """ LUME-Genesis class to parse input, run genesis, and parse output. By default, a temporary directory is created for working. """ def __init__(self, input_file=None, genesis_bin=None, use_tempdir=True, workdir=None, verbose=False ): # Save init self.original_input_file = input_file self.use_tempdir = use_tempdir self.workdir = workdir if workdir: assert os.path.exists(workdir), 'workdir does not exist: '+workdir self.verbose=verbose self.genesis_bin = find_genesis2_executable(genesis_bin, verbose=verbose) self.binary_prefixes = [] # For example, ['mpirun', '-n', '2'] self.finished = False # self.output = {} # self.timeout = None # Run control self.finished = False self.configured = False if input_file: self.load_input(input_file) self.configure() else: self.vprint('Warning: Input file does not exist. Not configured. Please call .load_input(input_file) and .configure()') def configure(self): self.configure_genesis(workdir=self.workdir) def configure_genesis(self, input_filePath=None, workdir=None): """ Configures working directory. """ if input_filePath: self.load_input(input_filePath) # Set paths if self.use_tempdir: # Need to attach this to the object. Otherwise it will go out of scope. self.tempdir = tempfile.TemporaryDirectory(dir=self.workdir) self.path = self.tempdir.name else: if workdir: self.path = workdir self.tempdir = None else: # Work in place self.path = self.original_path # Make full path self.input_file = os.path.join(self.path, 'genesis.in') self.vprint('Configured to run in:', self.path) self.configured = True # Conveniences @property def beam(self): return self.input['beam'] @property def lattice(self): try: return self.input['lattice'] except: print('No lattice found, assuming lattice is defined in input file.') return None @property def param(self): return self.input['param'] def load_input(self, filePath): """ Loads existing input file, with lattice """ assert os.path.exists(filePath), f'Input file does not exist: {filePath}' f = tools.full_path(filePath) self.original_path, self.input_file = os.path.split(f) # Get original path, name of main input self.input = { 'beam':None } d = self.input main = parsers.parse_main_inputfile(filePath) d['param'] = main if main['beamfile'] != '': fname = main['beamfile'] d['beam'] = parsers.parse_beam_file(main['beamfile'], verbose=self.verbose) # Use this new name main['beamfile'] = parsers.POSSIBLE_INPUT_FILES['beamfile'] else: d['beam'] = None if main['maginfile'] != '': self.load_lattice(filePath=main['maginfile'], verbose=self.verbose) # Use this new name main['maginfile'] = parsers.POSSIBLE_INPUT_FILES['maginfile'] else: main['lattice'] = None def load_output(self, filePath=None): if not filePath: fname = os.path.join(self.path, self.param['outputfile']) else: fname = filePath if os.path.exists(fname): self.output.update(parsers.parse_genesis_out(fname)) self.vprint('Loaded output:', fname) # Final field dflfile = fname+'.dfl' if os.path.exists(dflfile): self.output['data']['dfl'] = parsers.parse_genesis_dfl(dflfile, self.param['ncar']) self.vprint('Loaded dfl:', dflfile) # Field history fldfile = fname+'.fld' if os.path.exists(fldfile): # Time independent is just one slice if self['itdp'] == 0: nslice = 1 else: nslice = self.param['nslice'] self.output['data']['fld'] = parsers.parse_genesis_fld(fldfile, self.param['ncar'], nslice) self.vprint('Loaded fld:', fldfile) # Final particles dpafile = fname+'.dpa' if os.path.exists(dpafile): self.output['data']['dpa'] = parsers.parse_genesis_dpa(dpafile, self.param['npart']) self.vprint('Loaded dpa:', dpafile) # Particle history parfile = fname+'.par' if os.path.exists(parfile): self.output['data']['par'] = parsers.parse_genesis_dpa(parfile, self.param['npart']) self.vprint('Loaded par:', parfile) # def load_lattice(self, filePath=None, verbose=False): """ loads an original Genesis-style lattice into a standard_lattice """ if not filePath: fname = os.path.join(self.path, self.param['maginfile']) else: fname = filePath self.vprint('loading lattice: ', fname) lat = parsers.parse_genesis_lattice(fname) # Standardize lat['eles'] = lattice.standard_eles_from_eles(lat['eles']) self.input['lattice'] = lat def write_beam(self, filePath=None): if not self.beam: return if not filePath: filePath = os.path.join(self.path, self.param['beamfile']) writers.write_beam_file(filePath, self.beam, verbose=self.verbose) def write_input(self): """ Writes all input files """ self.write_input_file() self.write_beam() self.write_lattice() # Write the run script self.get_run_script() def write_input_file(self): """ Write parameters to main .in file """ lines = tools.namelist_lines(self.param, start='$newrun', end='$end') with open(self.input_file, 'w') as f: for line in lines: f.write(line+'\n') def write_lattice(self): if not self.lattice: self.input['lattice'] = None else: filePath = os.path.join(self.path, self.param['maginfile']) print(self.path, self.param['maginfile']) lattice.write_lattice(filePath, self.lattice) self.vprint('Lattice written:', filePath) def write_wavefront(self, h5=None): """ Write an openPMD wavefront from the dfl """ if not h5: h5 = 'genesis_wavefront_'+self.fingerprint()+'.h5' if isinstance(h5, str): fname = os.path.expandvars(h5) g = h5py.File(fname, 'w') self.vprint(f'Writing wavefront (dfl data) to file {fname}') else: g = h5 dfl = self.output['data']['dfl'] param = self.output['param'] writers.write_openpmd_wavefront_h5(g, dfl=dfl, param=param) return h5 def get_run_script(self, write_to_path=True): """ Assembles the run script. Optionally writes a file 'run' with this line to path. """ _, infile = os.path.split(self.input_file) runscript = [self.genesis_bin, infile] # Allow for MPI commands if len(self.binary_prefixes) > 0: runscript = self.binary_prefixes + runscript if write_to_path: filename = os.path.join(self.path, 'run') with open(filename, 'w') as f: f.write(' '.join(runscript)) tools.make_executable(filename) return runscript def run(self): if not self.configured: print('not configured to run') return self.run_genesis(verbose=self.verbose, timeout=self.timeout) def run_genesis(self, verbose=False, parse_output=True, timeout=None): # Check that binary exists self.genesis_bin = tools.full_path(self.genesis_bin) assert os.path.exists(self.genesis_bin), 'Genesis binary does not exist: '+ self.genesis_bin # Clear old output self.output = {} run_info = self.output['run_info'] = {} t1 = time() run_info['start_time'] = t1 # Move to local directory # Save init dir init_dir = os.getcwd() self.vprint('init dir: ', init_dir) os.chdir(self.path) # Debugging self.vprint('Running genesis in '+os.getcwd()) # Write all input self.write_input() runscript = self.get_run_script() run_info['run_script'] = ' '.join(runscript) try: if timeout: res = tools.execute2(runscript, timeout=timeout) log = res['log'] self.error = res['error'] run_info['why_error'] = res['why_error'] else: # Interactive output, for Jupyter log = [] for path in tools.execute(runscript): self.vprint(path, end="") log.append(path) self.log = log self.error = False if parse_output: self.load_output() except Exception as ex: print('Run Aborted', ex) self.error = True run_info['why_error'] = str(ex) finally: run_info['run_time'] = time() - t1 run_info['run_error'] = self.error # Return to init_dir os.chdir(init_dir) self.finished = True def fingerprint(self): """ Data fingerprint using the input. """ return tools.fingerprint(self.input) def vprint(self, *args, **kwargs): # Verbose print if self.verbose: print(*args, **kwargs) def input_twiss(self): betax = self['rxbeam']**2 * self['gamma0'] / self['emitx'] betay = self['rybeam']**2 * self['gamma0'] / self['emity'] alphax = self['alphax'] alphay = self['alphay'] return {'betax':betax, 'betay':betay, 'alphax':alphax, 'alphay':alphay} def archive(self, h5=None): """ Archive all data to an h5 handle or filename. If no file is given, a file based on the fingerprint will be created. """ if not h5: h5 = 'genesis_'+self.fingerprint()+'.h5' if isinstance(h5, str): fname = os.path.expandvars(h5) g = h5py.File(fname, 'w') self.vprint(f'Archiving to file {fname}') else: g = h5 # Write basic attributes archive.genesis_init(g) # All input archive.write_input_h5(g, self.input, name='input') # All output archive.write_output_h5(g, self.output, name='output', verbose=self.verbose) return h5 def load_archive(self, h5, configure=True): """ Loads input and output from archived h5 file. See: Genesis.archive """ if isinstance(h5, str): fname = os.path.expandvars(h5) g = h5py.File(fname, 'r') glist = archive.find_genesis_archives(g) n = len(glist) if n == 0: # legacy: try top level message = 'legacy' elif n == 1: gname = glist[0] message = f'group {gname} from' g = g[gname] else: raise ValueError(f'Multiple archives found in file {fname}: {glist}') self.vprint(f'Reading {message} archive file {h5}') else: g = h5 self.input = archive.read_input_h5(g['input']) self.output = archive.read_output_h5(g['output'], verbose=self.verbose) self.vprint('Loaded from archive. Note: Must reconfigure to run again.') self.configured = False if configure: self.configure() def copy(self): """ Returns a deep copy of this object. If a tempdir is being used, will clear this and deconfigure. """ G2 = deepcopy(self) # Clear this if G2.use_tempdir: G2.path = None G2.configured = False return G2 def __getitem__(self, key): """ Convenience syntax to get an attribute See: __setitem__ """ if key in self.param: return self.param[key] raise ValueError(f'{key} does not exist in input param') def __setitem__(self, key, item): """ Convenience syntax to set input parameters Example: G['ncar'] = 251 """ if key in self.param: self.param[key] = item else: raise ValueError(f'{key} does not exist in input param') def __str__(self): path = self.path s = '' if self.finished: s += 'Genesis finished in '+path elif self.configured: s += 'Genesis configured in '+path else: s += 'Genesis not configured.' return s
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import absolute_import from telemetry.core import util util.AddDirToPythonPath( util.GetTelemetryDir(), 'third_party', 'websocket-client') from websocket import create_connection # pylint: disable=W0611 from websocket import WebSocketException # pylint: disable=W0611 from websocket import WebSocketTimeoutException # pylint: disable=W0611
# -*- coding: utf-8 -*- import pytest from wemake_python_styleguide.violations.best_practices import ( BaseExceptionViolation, ) from wemake_python_styleguide.visitors.ast.keywords import ( WrongExceptionTypeVisitor, ) use_base_exception = """ try: execute() except BaseException: raise """ use_except_exception = """ try: 1 / 0 except Exception: raise """ use_bare_except = """ try: 1 / 0 except: raise """ @pytest.mark.parametrize('code', [ use_base_exception, ]) def test_use_base_exception( assert_errors, parse_ast_tree, code, default_options, ): """Testing that `except BaseException:` is restricted.""" tree = parse_ast_tree(code) visitor = WrongExceptionTypeVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [BaseExceptionViolation]) @pytest.mark.parametrize('code', [ use_except_exception, use_bare_except, ]) def test_use_exception( assert_errors, parse_ast_tree, code, default_options, ): """Testing that `except Exception:` and `except:` are allowed.""" tree = parse_ast_tree(code) visitor = WrongExceptionTypeVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [])
from .functional import *
from . import ac from . import q_learning from . import rnnq_learning AC = ac.ActorCritic MFAC = ac.MFAC IL = q_learning.DQN MFQ = q_learning.MFQ POMFQ = q_learning.POMFQ rnnIL = rnnq_learning.DQN rnnMFQ = rnnq_learning.MFQ def spawn_ai(algo_name, sess, env, handle, human_name, max_steps): if algo_name == 'mfq': model = MFQ(sess, human_name, handle, env, max_steps, memory_size=80000) elif algo_name == 'mfac': model = MFAC(sess, human_name, handle, env) elif algo_name == 'ac': model = AC(sess, human_name, handle, env) elif algo_name == 'il': model = IL(sess, human_name, handle, env, max_steps, memory_size=80000) elif algo_name == 'rnnIL': model = rnnIL(sess, human_name, handle, env, max_steps, memory_size=80000) elif algo_name == 'rnnMFQ': model = rnnMFQ(sess, human_name, handle, env, max_steps, memory_size=80000) elif algo_name == 'pomfq': model = POMFQ(sess, human_name, handle, env, max_steps, memory_size=80000) return model
from lib.types import IStdin, IStdout def main(stdin: IStdin, stdout: IStdout): stdout.write('*** You are a student at PWN_University and you are all set to graduate at the end of the semester. Unfortunately the night before graduation you learned you were going to fail your last class and now you’re afraid the school wont let you graduate. Luckily you have a friend in IT and after hearing of your situation he casually sends you a message with the IP address for one of the schools secure servers. Your goal is to hack into the server and figure out a way to change your grade! ***\n') stdout.write('\n') stdout.write('You are requesting access to an offical PWN_University server. Only authorised individuals are allowed further.\n') stdout.write('\n') stdout.write('*** You remember one of your IT friends who works for the university keeps their username encoded on their desk incase they forget the spelling. So you go to their desk and find out its MTMzN3VzZXI= ***\n') stdout.write('\n') stdout.write('Enter your username: ') stdout.flush() username = stdin.readline().strip('\n') if username == '1337user': stdout.write('\n') stdout.write('*** You then remember there was a data breach of all university passwords. Luckily PWN_University does not store their passwords in plain text, but rather in MD5 hashes. You navigate to the one associated with your friends username and it is 90f2c9c53f66540e67349e0ab83d8cd0 ***\n') stdout.write('\n') stdout.write('Now please enter your password: ') stdout.flush() password = stdin.readline().strip('\n') if password == 'p@ssword': stdout.write('Login Successful!\n') stdout.write('\n') stdout.write('*** Now that you have logged into the server you remember your IT friend implying that the database of grades is a mysql databse. Maybe you should try changing directories to where that is commonly stored (please use the full path) ***\n') stdout.write('\n') stdout.write('~$ ') stdout.flush() path = stdin.readline().strip('\n') if path == 'cd /var/lib/mysql': stdout.write('\n') stdout.write('*** Wow it looks like your getting close you are now in the mysql directory. You run some SQL queries on the grades database and are able to select the string that says \'PWNER1337 has a F\'. All you have to do is replace F with an A (type in the SQL command to do this bellow) ***\n') stdout.write('\n') stdout.write('mysql> ') stdout.flush() sql = stdin.readline().strip('\n') #if sql == 'REPLACE(\'PWNER1337 has a F\', \'F\', \'A\');': if 'REPLACE' in sql and 'PWNER1337' in sql and 'F' in sql and 'A' in sql: stdout.write('\n') stdout.write('*** Congratulations you changed your grade from an F to an A. Unfortunatly the university caught you in the act, but because you were able to hack PWN_University they decided to let you graduate after all! ***\n') stdout.write('\n') stdout.write('*** Present this flag to the challenge oragnizer to claim your prize! flag{CI_NETSEC_1ST_COMP}\n') else : stdout.write('\n') stdout.write('*** Oh no looks like you entered the wrong SQL command maybe you should try reconnecting to the server and try another answer... ***\n') else : stdout.write('\n') stdout.write('*** Oh no looks like you entered the wrong path maybe you should try reconnecting to the server and try another answer... ***\n') else : stdout.write('\n') stdout.write('Thats not the correct password access denied!\n') stdout.write('*** Oh no looks like your access was denied maybe you should try reconnecting to the server and try another answer... ***\n') else : stdout.write('\n') stdout.write('Thats not a valid username access denied!\n') stdout.write('*** Oh no looks like your access was denied maybe you should try reconnecting to the server and try another answer... ***\n')
"""Constants for the ISY994 Platform.""" import logging from homeassistant.components.binary_sensor import ( DEVICE_CLASS_BATTERY, DEVICE_CLASS_COLD, DEVICE_CLASS_DOOR, DEVICE_CLASS_GAS, DEVICE_CLASS_HEAT, DEVICE_CLASS_MOISTURE, DEVICE_CLASS_MOTION, DEVICE_CLASS_OPENING, DEVICE_CLASS_PROBLEM, DEVICE_CLASS_SAFETY, DEVICE_CLASS_SMOKE, DEVICE_CLASS_SOUND, DEVICE_CLASS_VIBRATION, DOMAIN as BINARY_SENSOR, ) from homeassistant.components.climate.const import ( CURRENT_HVAC_COOL, CURRENT_HVAC_FAN, CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, DOMAIN as CLIMATE, FAN_AUTO, FAN_HIGH, FAN_MEDIUM, FAN_ON, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, PRESET_AWAY, PRESET_BOOST, ) from homeassistant.components.cover import DOMAIN as COVER from homeassistant.components.fan import DOMAIN as FAN from homeassistant.components.light import DOMAIN as LIGHT from homeassistant.components.lock import DOMAIN as LOCK from homeassistant.components.sensor import DOMAIN as SENSOR from homeassistant.components.switch import DOMAIN as SWITCH from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ENERGY_KILO_WATT_HOUR, FREQUENCY_HERTZ, LENGTH_CENTIMETERS, LENGTH_FEET, LENGTH_INCHES, LENGTH_KILOMETERS, LENGTH_METERS, LENGTH_MILES, MASS_KILOGRAMS, MASS_POUNDS, POWER_WATT, PRESSURE_INHG, SERVICE_LOCK, SERVICE_UNLOCK, SPEED_KILOMETERS_PER_HOUR, SPEED_METERS_PER_SECOND, SPEED_MILES_PER_HOUR, STATE_CLOSED, STATE_CLOSING, STATE_LOCKED, STATE_OFF, STATE_ON, STATE_OPEN, STATE_OPENING, STATE_PROBLEM, STATE_UNKNOWN, STATE_UNLOCKED, TEMP_CELSIUS, TEMP_FAHRENHEIT, TEMP_KELVIN, TIME_DAYS, TIME_HOURS, TIME_MILLISECONDS, TIME_MINUTES, TIME_MONTHS, TIME_SECONDS, TIME_YEARS, UNIT_PERCENTAGE, UV_INDEX, VOLT, VOLUME_GALLONS, VOLUME_LITERS, ) _LOGGER = logging.getLogger(__package__) DOMAIN = "isy994" MANUFACTURER = "Universal Devices, Inc" CONF_IGNORE_STRING = "ignore_string" CONF_SENSOR_STRING = "sensor_string" CONF_VAR_SENSOR_STRING = "variable_sensor_string" CONF_TLS_VER = "tls" CONF_RESTORE_LIGHT_STATE = "restore_light_state" DEFAULT_IGNORE_STRING = "{IGNORE ME}" DEFAULT_SENSOR_STRING = "sensor" DEFAULT_RESTORE_LIGHT_STATE = False DEFAULT_TLS_VERSION = 1.1 DEFAULT_PROGRAM_STRING = "HA." DEFAULT_VAR_SENSOR_STRING = "HA." KEY_ACTIONS = "actions" KEY_STATUS = "status" SUPPORTED_PLATFORMS = [BINARY_SENSOR, SENSOR, LOCK, FAN, COVER, LIGHT, SWITCH, CLIMATE] SUPPORTED_PROGRAM_PLATFORMS = [BINARY_SENSOR, LOCK, FAN, COVER, SWITCH] SUPPORTED_BIN_SENS_CLASSES = ["moisture", "opening", "motion", "climate"] # ISY Scenes are more like Switches than Home Assistant Scenes # (they can turn off, and report their state) ISY_GROUP_PLATFORM = SWITCH ISY994_ISY = "isy" ISY994_NODES = "isy994_nodes" ISY994_PROGRAMS = "isy994_programs" ISY994_VARIABLES = "isy994_variables" FILTER_UOM = "uom" FILTER_STATES = "states" FILTER_NODE_DEF_ID = "node_def_id" FILTER_INSTEON_TYPE = "insteon_type" FILTER_ZWAVE_CAT = "zwave_cat" # Special Subnodes for some Insteon Devices SUBNODE_CLIMATE_COOL = 2 SUBNODE_CLIMATE_HEAT = 3 SUBNODE_DUSK_DAWN = 2 SUBNODE_EZIO2X4_SENSORS = [9, 10, 11, 12] SUBNODE_FANLINC_LIGHT = 1 SUBNODE_HEARTBEAT = 4 SUBNODE_IOLINC_RELAY = 2 SUBNODE_LOW_BATTERY = 3 SUBNODE_MOTION_DISABLED = (13, 19) # Int->13 or Hex->0xD depending on firmware SUBNODE_NEGATIVE = 2 SUBNODE_TAMPER = (10, 16) # Int->10 or Hex->0xA depending on firmware # Generic Insteon Type Categories for Filters TYPE_CATEGORY_CONTROLLERS = "0." TYPE_CATEGORY_DIMMABLE = "1." TYPE_CATEGORY_SWITCHED = "2." TYPE_CATEGORY_IRRIGATION = "4." TYPE_CATEGORY_CLIMATE = "5." TYPE_CATEGORY_POOL_CTL = "6." TYPE_CATEGORY_SENSOR_ACTUATORS = "7." TYPE_CATEGORY_ENERGY_MGMT = "9." TYPE_CATEGORY_COVER = "14." TYPE_CATEGORY_LOCK = "15." TYPE_CATEGORY_SAFETY = "16." TYPE_CATEGORY_X10 = "113." TYPE_EZIO2X4 = "7.3.255." TYPE_INSTEON_MOTION = ("16.1.", "16.22.") UNDO_UPDATE_LISTENER = "undo_update_listener" # Used for discovery UDN_UUID_PREFIX = "uuid:" ISY_URL_POSTFIX = "/desc" # Do not use the Home Assistant consts for the states here - we're matching exact API # responses, not using them for Home Assistant states # Insteon Types: https://www.universal-devices.com/developers/wsdk/5.0.4/1_fam.xml # Z-Wave Categories: https://www.universal-devices.com/developers/wsdk/5.0.4/4_fam.xml NODE_FILTERS = { BINARY_SENSOR: { FILTER_UOM: [], FILTER_STATES: [], FILTER_NODE_DEF_ID: [ "BinaryAlarm", "BinaryAlarm_ADV", "BinaryControl", "BinaryControl_ADV", "EZIO2x4_Input", "EZRAIN_Input", "OnOffControl", "OnOffControl_ADV", ], FILTER_INSTEON_TYPE: [ "7.0.", "7.13.", TYPE_CATEGORY_SAFETY, ], # Does a startswith() match; include the dot FILTER_ZWAVE_CAT: (["104", "112", "138"] + list(map(str, range(148, 180)))), }, SENSOR: { # This is just a more-readable way of including MOST uoms between 1-100 # (Remember that range() is non-inclusive of the stop value) FILTER_UOM: ( ["1"] + list(map(str, range(3, 11))) + list(map(str, range(12, 51))) + list(map(str, range(52, 66))) + list(map(str, range(69, 78))) + ["79"] + list(map(str, range(82, 97))) ), FILTER_STATES: [], FILTER_NODE_DEF_ID: [ "IMETER_SOLO", "EZIO2x4_Input_ADV", "KeypadButton", "KeypadButton_ADV", "RemoteLinc2", "RemoteLinc2_ADV", ], FILTER_INSTEON_TYPE: ["0.16.", "0.17.", "0.18.", "9.0.", "9.7."], FILTER_ZWAVE_CAT: (["118", "143"] + list(map(str, range(180, 185)))), }, LOCK: { FILTER_UOM: ["11"], FILTER_STATES: ["locked", "unlocked"], FILTER_NODE_DEF_ID: ["DoorLock"], FILTER_INSTEON_TYPE: [TYPE_CATEGORY_LOCK, "4.64."], FILTER_ZWAVE_CAT: ["111"], }, FAN: { FILTER_UOM: [], FILTER_STATES: ["off", "low", "med", "high"], FILTER_NODE_DEF_ID: ["FanLincMotor"], FILTER_INSTEON_TYPE: ["1.46."], FILTER_ZWAVE_CAT: [], }, COVER: { FILTER_UOM: ["97"], FILTER_STATES: ["open", "closed", "closing", "opening", "stopped"], FILTER_NODE_DEF_ID: [], FILTER_INSTEON_TYPE: [], FILTER_ZWAVE_CAT: [], }, LIGHT: { FILTER_UOM: ["51"], FILTER_STATES: ["on", "off", "%"], FILTER_NODE_DEF_ID: [ "BallastRelayLampSwitch", "BallastRelayLampSwitch_ADV", "DimmerLampOnly", "DimmerLampSwitch", "DimmerLampSwitch_ADV", "DimmerSwitchOnly", "DimmerSwitchOnly_ADV", "KeypadDimmer", "KeypadDimmer_ADV", ], FILTER_INSTEON_TYPE: [TYPE_CATEGORY_DIMMABLE], FILTER_ZWAVE_CAT: ["109", "119"], }, SWITCH: { FILTER_UOM: ["2", "78"], FILTER_STATES: ["on", "off"], FILTER_NODE_DEF_ID: [ "AlertModuleArmed", "AlertModuleSiren", "AlertModuleSiren_ADV", "EZIO2x4_Output", "EZRAIN_Output", "KeypadRelay", "KeypadRelay_ADV", "RelayLampOnly", "RelayLampOnly_ADV", "RelayLampSwitch", "RelayLampSwitch_ADV", "RelaySwitchOnlyPlusQuery", "RelaySwitchOnlyPlusQuery_ADV", "Siren", "Siren_ADV", "X10", ], FILTER_INSTEON_TYPE: [ TYPE_CATEGORY_SWITCHED, "7.3.255.", "9.10.", "9.11.", TYPE_CATEGORY_X10, ], FILTER_ZWAVE_CAT: ["121", "122", "123", "137", "141", "147"], }, CLIMATE: { FILTER_UOM: ["2"], FILTER_STATES: ["heating", "cooling", "idle", "fan_only", "off"], FILTER_NODE_DEF_ID: ["TempLinc", "Thermostat"], FILTER_INSTEON_TYPE: ["4.8", TYPE_CATEGORY_CLIMATE], FILTER_ZWAVE_CAT: ["140"], }, } UOM_ISYV4_DEGREES = "degrees" UOM_ISYV4_NONE = "n/a" UOM_ISY_CELSIUS = 1 UOM_ISY_FAHRENHEIT = 2 UOM_DOUBLE_TEMP = "101" UOM_HVAC_ACTIONS = "66" UOM_HVAC_MODE_GENERIC = "67" UOM_HVAC_MODE_INSTEON = "98" UOM_FAN_MODES = "99" UOM_INDEX = "25" UOM_ON_OFF = "2" UOM_FRIENDLY_NAME = { "1": "A", "3": f"btu/{TIME_HOURS}", "4": TEMP_CELSIUS, "5": LENGTH_CENTIMETERS, "6": "ft³", "7": f"ft³/{TIME_MINUTES}", "8": "m³", "9": TIME_DAYS, "10": TIME_DAYS, "12": "dB", "13": "dB A", "14": DEGREE, "16": "macroseismic", "17": TEMP_FAHRENHEIT, "18": LENGTH_FEET, "19": TIME_HOURS, "20": TIME_HOURS, "21": "%AH", "22": "%RH", "23": PRESSURE_INHG, "24": f"{LENGTH_INCHES}/{TIME_HOURS}", UOM_INDEX: "index", # Index type. Use "node.formatted" for value "26": TEMP_KELVIN, "27": "keyword", "28": MASS_KILOGRAMS, "29": "kV", "30": "kW", "31": "kPa", "32": SPEED_KILOMETERS_PER_HOUR, "33": ENERGY_KILO_WATT_HOUR, "34": "liedu", "35": VOLUME_LITERS, "36": "lx", "37": "mercalli", "38": LENGTH_METERS, "39": f"{LENGTH_METERS}³/{TIME_HOURS}", "40": SPEED_METERS_PER_SECOND, "41": "mA", "42": TIME_MILLISECONDS, "43": "mV", "44": TIME_MINUTES, "45": TIME_MINUTES, "46": f"mm/{TIME_HOURS}", "47": TIME_MONTHS, "48": SPEED_MILES_PER_HOUR, "49": SPEED_METERS_PER_SECOND, "50": "Ω", "51": UNIT_PERCENTAGE, "52": MASS_POUNDS, "53": "pf", "54": CONCENTRATION_PARTS_PER_MILLION, "55": "pulse count", "57": TIME_SECONDS, "58": TIME_SECONDS, "59": "S/m", "60": "m_b", "61": "M_L", "62": "M_w", "63": "M_S", "64": "shindo", "65": "SML", "69": VOLUME_GALLONS, "71": UV_INDEX, "72": VOLT, "73": POWER_WATT, "74": f"{POWER_WATT}/{LENGTH_METERS}²", "75": "weekday", "76": DEGREE, "77": TIME_YEARS, "82": "mm", "83": LENGTH_KILOMETERS, "85": "Ω", "86": "kΩ", "87": f"{LENGTH_METERS}³/{LENGTH_METERS}³", "88": "Water activity", "89": "RPM", "90": FREQUENCY_HERTZ, "91": DEGREE, "92": f"{DEGREE} South", "100": "", # Range 0-255, no unit. UOM_DOUBLE_TEMP: UOM_DOUBLE_TEMP, "102": "kWs", "103": "$", "104": "¢", "105": LENGTH_INCHES, "106": f"mm/{TIME_DAYS}", "107": "", # raw 1-byte unsigned value "108": "", # raw 2-byte unsigned value "109": "", # raw 3-byte unsigned value "110": "", # raw 4-byte unsigned value "111": "", # raw 1-byte signed value "112": "", # raw 2-byte signed value "113": "", # raw 3-byte signed value "114": "", # raw 4-byte signed value "116": LENGTH_MILES, "117": "mbar", "118": "hPa", "119": f"{POWER_WATT}{TIME_HOURS}", "120": f"{LENGTH_INCHES}/{TIME_DAYS}", } UOM_TO_STATES = { "11": { # Deadbolt Status 0: STATE_UNLOCKED, 100: STATE_LOCKED, 101: STATE_UNKNOWN, 102: STATE_PROBLEM, }, "15": { # Door Lock Alarm 1: "master code changed", 2: "tamper code entry limit", 3: "escutcheon removed", 4: "key/manually locked", 5: "locked by touch", 6: "key/manually unlocked", 7: "remote locking jammed bolt", 8: "remotely locked", 9: "remotely unlocked", 10: "deadbolt jammed", 11: "battery too low to operate", 12: "critical low battery", 13: "low battery", 14: "automatically locked", 15: "automatic locking jammed bolt", 16: "remotely power cycled", 17: "lock handling complete", 19: "user deleted", 20: "user added", 21: "duplicate pin", 22: "jammed bolt by locking with keypad", 23: "locked by keypad", 24: "unlocked by keypad", 25: "keypad attempt outside schedule", 26: "hardware failure", 27: "factory reset", }, UOM_HVAC_ACTIONS: { # Thermostat Heat/Cool State 0: CURRENT_HVAC_IDLE, 1: CURRENT_HVAC_HEAT, 2: CURRENT_HVAC_COOL, 3: CURRENT_HVAC_FAN, 4: CURRENT_HVAC_HEAT, # Pending Heat 5: CURRENT_HVAC_COOL, # Pending Cool # >6 defined in ISY but not implemented, leaving for future expanision. 6: CURRENT_HVAC_IDLE, 7: CURRENT_HVAC_HEAT, 8: CURRENT_HVAC_HEAT, 9: CURRENT_HVAC_COOL, 10: CURRENT_HVAC_HEAT, 11: CURRENT_HVAC_HEAT, }, UOM_HVAC_MODE_GENERIC: { # Thermostat Mode 0: HVAC_MODE_OFF, 1: HVAC_MODE_HEAT, 2: HVAC_MODE_COOL, 3: HVAC_MODE_AUTO, 4: PRESET_BOOST, 5: "resume", 6: HVAC_MODE_FAN_ONLY, 7: "furnace", 8: HVAC_MODE_DRY, 9: "moist air", 10: "auto changeover", 11: "energy save heat", 12: "energy save cool", 13: PRESET_AWAY, 14: HVAC_MODE_AUTO, 15: HVAC_MODE_AUTO, 16: HVAC_MODE_AUTO, }, "68": { # Thermostat Fan Mode 0: FAN_AUTO, 1: FAN_ON, 2: FAN_HIGH, # Auto High 3: FAN_HIGH, 4: FAN_MEDIUM, # Auto Medium 5: FAN_MEDIUM, 6: "circulation", 7: "humidity circulation", }, "78": {0: STATE_OFF, 100: STATE_ON}, # 0-Off 100-On "79": {0: STATE_OPEN, 100: STATE_CLOSED}, # 0-Open 100-Close "80": { # Thermostat Fan Run State 0: STATE_OFF, 1: STATE_ON, 2: "on high", 3: "on medium", 4: "circulation", 5: "humidity circulation", 6: "right/left circulation", 7: "up/down circulation", 8: "quiet circulation", }, "84": {0: SERVICE_LOCK, 1: SERVICE_UNLOCK}, # Secure Mode "93": { # Power Management Alarm 1: "power applied", 2: "ac mains disconnected", 3: "ac mains reconnected", 4: "surge detection", 5: "volt drop or drift", 6: "over current detected", 7: "over voltage detected", 8: "over load detected", 9: "load error", 10: "replace battery soon", 11: "replace battery now", 12: "battery is charging", 13: "battery is fully charged", 14: "charge battery soon", 15: "charge battery now", }, "94": { # Appliance Alarm 1: "program started", 2: "program in progress", 3: "program completed", 4: "replace main filter", 5: "failure to set target temperature", 6: "supplying water", 7: "water supply failure", 8: "boiling", 9: "boiling failure", 10: "washing", 11: "washing failure", 12: "rinsing", 13: "rinsing failure", 14: "draining", 15: "draining failure", 16: "spinning", 17: "spinning failure", 18: "drying", 19: "drying failure", 20: "fan failure", 21: "compressor failure", }, "95": { # Home Health Alarm 1: "leaving bed", 2: "sitting on bed", 3: "lying on bed", 4: "posture changed", 5: "sitting on edge of bed", }, "96": { # VOC Level 1: "clean", 2: "slightly polluted", 3: "moderately polluted", 4: "highly polluted", }, "97": { # Barrier Status **{ 0: STATE_CLOSED, 100: STATE_OPEN, 101: STATE_UNKNOWN, 102: "stopped", 103: STATE_CLOSING, 104: STATE_OPENING, }, **{ b: f"{b} %" for a, b in enumerate(list(range(1, 100))) }, # 1-99 are percentage open }, UOM_HVAC_MODE_INSTEON: { # Insteon Thermostat Mode 0: HVAC_MODE_OFF, 1: HVAC_MODE_HEAT, 2: HVAC_MODE_COOL, 3: HVAC_MODE_HEAT_COOL, 4: HVAC_MODE_FAN_ONLY, 5: HVAC_MODE_AUTO, # Program Auto 6: HVAC_MODE_AUTO, # Program Heat-Set @ Local Device Only 7: HVAC_MODE_AUTO, # Program Cool-Set @ Local Device Only }, UOM_FAN_MODES: {7: FAN_ON, 8: FAN_AUTO}, # Insteon Thermostat Fan Mode "115": { # Most recent On style action taken for lamp control 0: "on", 1: "off", 2: "fade up", 3: "fade down", 4: "fade stop", 5: "fast on", 6: "fast off", 7: "triple press on", 8: "triple press off", 9: "4x press on", 10: "4x press off", 11: "5x press on", 12: "5x press off", }, } ISY_HVAC_MODES = [ HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO, HVAC_MODE_FAN_ONLY, ] HA_HVAC_TO_ISY = { HVAC_MODE_OFF: "off", HVAC_MODE_HEAT: "heat", HVAC_MODE_COOL: "cool", HVAC_MODE_HEAT_COOL: "auto", HVAC_MODE_FAN_ONLY: "fan_only", HVAC_MODE_AUTO: "program_auto", } HA_FAN_TO_ISY = {FAN_ON: "on", FAN_AUTO: "auto"} BINARY_SENSOR_DEVICE_TYPES_ISY = { DEVICE_CLASS_MOISTURE: ["16.8.", "16.13.", "16.14."], DEVICE_CLASS_OPENING: [ "16.9.", "16.6.", "16.7.", "16.2.", "16.17.", "16.20.", "16.21.", ], DEVICE_CLASS_MOTION: ["16.1.", "16.4.", "16.5.", "16.3.", "16.22."], } BINARY_SENSOR_DEVICE_TYPES_ZWAVE = { DEVICE_CLASS_SAFETY: ["137", "172", "176", "177", "178"], DEVICE_CLASS_SMOKE: ["138", "156"], DEVICE_CLASS_PROBLEM: ["148", "149", "157", "158", "164", "174", "175"], DEVICE_CLASS_GAS: ["150", "151"], DEVICE_CLASS_SOUND: ["153"], DEVICE_CLASS_COLD: ["152", "168"], DEVICE_CLASS_HEAT: ["154", "166", "167"], DEVICE_CLASS_MOISTURE: ["159", "169"], DEVICE_CLASS_DOOR: ["160"], DEVICE_CLASS_BATTERY: ["162"], DEVICE_CLASS_MOTION: ["155"], DEVICE_CLASS_VIBRATION: ["173"], }
from scfmsp.controlflowanalysis.StatusRegister import StatusRegister from scfmsp.controlflowanalysis.instructions.AbstractInstructionBranching import AbstractInstructionBranching class InstructionJz(AbstractInstructionBranching): name = 'jz' def get_execution_time(self): return 2 def get_branching_condition_domain(self, ac): return ac.sra.get(StatusRegister.ZERO)
# Import the Twython class from twython import Twython import json import os import pandas as pd from tqdm import tqdm try: os.remove('twitter_dataset.csv') except OSError: pass def main(): old_df = pd.read_csv('data/twitter_dataset_2.csv', lineterminator='\n') #first load the dictonary with the top used english words with open('improved_dict.txt') as d: word_list = d.read() words = word_list.split('\n') # Dictonary structure with the fields that we are interested in acquire from the tweets dict_ = {'user': [], 'text': [], 'hashtags': [], 'mentions': [] } # Instantiate an object python_tweets = Twython('9Tz9FnZ1PR9AcEvudwC7hqOod', #API Key 'Z7upFmGJZE3oAfcb2ZUmRdEeBJJkkYTQ86PuB3iKgWqXFdMFNo') #API Secret #each query has a target word queries = [] for w in words: query = {'q': w, #the query word 'result_type': 'recent', 'count': 100, #100 tweets, which is the maximum limit admitted by Twitter 'lang': 'en', #we are interested only in english tweets } queries.append(query) #perform the queries to get the tweet and map the JSON in our dictonary for q in tqdm(queries[:50]): for status in python_tweets.search(**q)['statuses']: dict_['user'].append(status['user']['screen_name']) #username dict_['text'].append(status['text']) #content of the tweet #this is necessary cuz the hashtags may be null or there can be more than one #this can easily be done with this magical regular expression ht = [d['text'] for d in status['entities']['hashtags'] if 'text' in d] #list of hashtags dict_['hashtags'].append(ht) #same thing for the mentions ment = [d['screen_name'] for d in status['entities']['user_mentions'] if 'screen_name' in d] #list of mentions dict_['mentions'].append(ment) # Structure data in a pandas DataFrame for easier manipulation df = pd.DataFrame(dict_) df = df.append(old_df) df.to_csv('data/twitter_dataset_2.csv', index=False, encoding='utf-8') if __name__ == '__main__': main() from time import sleep while True: sleep(1200) main()
""" grdfilter - Filter a grid in the space (or time) domain. """ from pygmt.clib import Session from pygmt.helpers import ( GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, ) from pygmt.io import load_dataarray @fmt_docstring @use_alias( D="distance", F="filter", G="outgrid", I="spacing", N="nans", R="region", T="toggle", V="verbose", f="coltypes", r="registration", ) @kwargs_to_strings(I="sequence", R="sequence") def grdfilter(grid, **kwargs): r""" Filter a grid in the space (or time) domain. Filter a grid file in the time domain using one of the selected convolution or non-convolution isotropic or rectangular filters and compute distances using Cartesian or Spherical geometries. The output grid file can optionally be generated as a sub-region of the input (via ``region``) and/or with new increment (via ``spacing``) or registration (via ``toggle``). In this way, one may have "extra space" in the input data so that the edges will not be used and the output can be within one half-width of the input edges. If the filter is low-pass, then the output may be less frequently sampled than the input. Full option list at :gmt-docs:`grdfilter.html` {aliases} Parameters ---------- grid : str or xarray.DataArray The file name of the input grid or the grid loaded as a DataArray. outgrid : str or None The name of the output netCDF file with extension .nc to store the grid in. filter : str **b**\|\ **c**\|\ **g**\|\ **o**\|\ **m**\|\ **p**\|\ **h**\ *xwidth*\ [/*width2*\][*modifiers*]. Name of filter type you which to apply, followed by the width: b: Box Car c: Cosine Arch g: Gaussian o: Operator m: Median p: Maximum Likelihood probability h: histogram distance : str Distance *flag* tells how grid (x,y) relates to filter width as follows: p: grid (px,py) with *width* an odd number of pixels; Cartesian distances. 0: grid (x,y) same units as *width*, Cartesian distances. 1: grid (x,y) in degrees, *width* in kilometers, Cartesian distances. 2: grid (x,y) in degrees, *width* in km, dx scaled by cos(middle y), Cartesian distances. The above options are fastest because they allow weight matrix to be computed only once. The next three options are slower because they recompute weights for each latitude. 3: grid (x,y) in degrees, *width* in km, dx scaled by cosine(y), Cartesian distance calculation. 4: grid (x,y) in degrees, *width* in km, Spherical distance calculation. 5: grid (x,y) in Mercator ``projection='m1'`` img units, *width* in km, Spherical distance calculation. {I} nans : str or float **i**\|\ **p**\|\ **r**. Determine how NaN-values in the input grid affects the filtered output. {R} toggle : bool Toggle the node registration for the output grid so as to become the opposite of the input grid. [Default gives the same registration as the input grid]. {V} {f} {r} Returns ------- ret: xarray.DataArray or None Return type depends on whether the ``outgrid`` parameter is set: - :class:`xarray.DataArray` if ``outgrid`` is not set - None if ``outgrid`` is set (grid output will be stored in file set by ``outgrid``) Example ------- >>> import os >>> import pygmt >>> # Apply a filter of 600km (full width) to the @earth_relief_30m file >>> # and return a filtered field (saved as netcdf) >>> pygmt.grdfilter( ... grid="@earth_relief_30m", ... filter="m600", ... distance="4", ... region=[150, 250, 10, 40], ... spacing=0.5, ... outgrid="filtered_pacific.nc", ... ) >>> os.remove("filtered_pacific.nc") # cleanup file >>> # Apply a gaussian smoothing filter of 600 km in the input data array, >>> # and returns a filtered data array with the smoothed field. >>> grid = pygmt.datasets.load_earth_relief() >>> smooth_field = pygmt.grdfilter(grid=grid, filter="g600", distance="4") """ with GMTTempFile(suffix=".nc") as tmpfile: with Session() as lib: file_context = lib.virtualfile_from_data(check_kind="raster", data=grid) with file_context as infile: if (outgrid := kwargs.get("G")) is None: kwargs["G"] = outgrid = tmpfile.name # output to tmpfile lib.call_module("grdfilter", build_arg_string(kwargs, infile=infile)) return load_dataarray(outgrid) if outgrid == tmpfile.name else None
"""Data structure of RSS and useful functions. """ # # Copyright (c) 2005-2020 shinGETsu Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # import html import re import cgi from .template import Template class Item: """One item.""" title = "" link = "" description = "" date = 0 # Seconds from 1970-01-01T00:00 def __init__(self, link="", title="", date=0, creator='', subject=None, description="", content=""): """Constructor.""" del_eos = re.compile(r'[\r\n]*') self.link = link self.date = date self.creator = creator if subject: self.subject = subject else: self.subject = [] self.title = del_eos.sub('', title) self.description = del_eos.sub('', description) self.content = content class RSS(dict): """RSS. It is the dictionary which key is URI. """ encode = "utf-8" lang = "en" title = "" parent = "" # Place where is documents or RSS link = "" # URI of main page uri = "" # URI of RSS description = "" def __init__(self, encode="utf-8", lang="en", title="", parent="", link="", uri="", description="", xsl=""): """Constructor.""" self.encode = encode self.lang = lang self.title = title self.description = description self.parent = parent self.xsl = xsl if parent and parent[-1] != "/": parent += "/" self.parent += "/" if link != "": self.link = link else: self.link = parent if uri != "": self.uri = uri else: self.uri = parent + "rss.xml" def append(self, link, title = "", date = 0, creator = '', subject = None, description = "", content = "", abs = False): """Add an item.""" if not abs: link = self.parent + link item = Item(link, title = title, date = date, creator = creator, subject = subject, description = description, content = content) self[link] = item def keys(self): """List of links sorted by date.""" links = list(dict.keys(self)) links.sort(key=lambda x: self[x].date, reverse=True) return links def __iter__(self): return iter(list(self.keys())) def make_rss1(rss): '''Generate RSS 1.0. ''' def w3cdate(date): from time import strftime, gmtime return strftime('%Y-%m-%dT%H:%M:%SZ', gmtime(date)) var = { 'rss': rss, 'feed': [rss[uri] for uri in rss], 'w3cdate': w3cdate, 'escape': html.escape, } return Template().display('rss1', var)
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) from .kern import Kern import numpy as np from ...core.parameterization import Param from paramz.transformations import Logexp from paramz.caching import Cache_this class Static(Kern): def __init__(self, input_dim, variance, active_dims, name): super(Static, self).__init__(input_dim, active_dims, name) self.variance = Param('variance', variance, Logexp()) self.link_parameters(self.variance) def _to_dict(self): input_dict = super(Static, self)._to_dict() input_dict["variance"] = self.variance.values.tolist() return input_dict def Kdiag(self, X): ret = np.empty((X.shape[0],), dtype=np.float64) ret[:] = self.variance return ret def gradients_X(self, dL_dK, X, X2=None): return np.zeros(X.shape) def gradients_X_diag(self, dL_dKdiag, X): return np.zeros(X.shape) def gradients_XX(self, dL_dK, X, X2=None): if X2 is None: X2 = X return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]), dtype=np.float64) def gradients_XX_diag(self, dL_dKdiag, X, cov=False): return np.zeros((X.shape[0], X.shape[1], X.shape[1]), dtype=np.float64) def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): return np.zeros(Z.shape) def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape) def psi0(self, Z, variational_posterior): return self.Kdiag(variational_posterior.mean) def psi1(self, Z, variational_posterior): return self.K(variational_posterior.mean, Z) def psi2(self, Z, variational_posterior): K = self.K(variational_posterior.mean, Z) return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes def input_sensitivity(self, summarize=True): if summarize: return super(Static, self).input_sensitivity(summarize=summarize) else: return np.ones(self.input_dim) * self.variance class White(Static): def __init__(self, input_dim, variance=1., active_dims=None, name='white'): super(White, self).__init__(input_dim, variance, active_dims, name) def K(self, X, X2=None): if X2 is None: return np.eye(X.shape[0])*self.variance else: return np.zeros((X.shape[0], X2.shape[0])) def psi2(self, Z, variational_posterior): return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) def psi2n(self, Z, variational_posterior): return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64) def update_gradients_full(self, dL_dK, X, X2=None): if X2 is None: self.variance.gradient = np.trace(dL_dK) else: self.variance.gradient = 0. def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = dL_dKdiag.sum() def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): self.variance.gradient = dL_dpsi0.sum() class WhiteHeteroscedastic(Static): def __init__(self, input_dim, num_data, variance=1., active_dims=None, name='white_hetero'): """ A heteroscedastic White kernel (nugget/noise). It defines one variance (nugget) per input sample. Prediction excludes any noise learnt by this Kernel, so be careful using this kernel. You can plot the errors learnt by this kernel by something similar as: plt.errorbar(m.X, m.Y, yerr=2*np.sqrt(m.kern.white.variance)) """ super(Static, self).__init__(input_dim, active_dims, name) self.variance = Param('variance', np.ones(num_data) * variance, Logexp()) self.link_parameters(self.variance) def Kdiag(self, X): if X.shape[0] == self.variance.shape[0]: # If the input has the same number of samples as # the number of variances, we return the variances return self.variance return 0. def K(self, X, X2=None): if X2 is None and X.shape[0] == self.variance.shape[0]: return np.eye(X.shape[0]) * self.variance else: return 0. def psi2(self, Z, variational_posterior): return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) def psi2n(self, Z, variational_posterior): return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64) def update_gradients_full(self, dL_dK, X, X2=None): if X2 is None: self.variance.gradient = np.diagonal(dL_dK) else: self.variance.gradient = 0. def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = dL_dKdiag def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): self.variance.gradient = dL_dpsi0 class Bias(Static): def __init__(self, input_dim, variance=1., active_dims=None, name='bias'): super(Bias, self).__init__(input_dim, variance, active_dims, name) def to_dict(self): input_dict = super(Bias, self)._to_dict() input_dict["class"] = "GPy.kern.Bias" return input_dict @staticmethod def _from_dict(kernel_class, input_dict): useGPU = input_dict.pop('useGPU', None) return Bias(**input_dict) def K(self, X, X2=None): shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0]) return np.full(shape, self.variance, dtype=np.float64) def update_gradients_full(self, dL_dK, X, X2=None): self.variance.gradient = dL_dK.sum() def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = dL_dKdiag.sum() def psi2(self, Z, variational_posterior): return np.full((Z.shape[0], Z.shape[0]), self.variance*self.variance*variational_posterior.shape[0], dtype=np.float64) def psi2n(self, Z, variational_posterior): ret = np.empty((variational_posterior.mean.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) ret[:] = self.variance*self.variance return ret def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): if dL_dpsi2.ndim == 2: self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0]) else: self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()) class Fixed(Static): def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'): """ :param input_dim: the number of input dimensions :type input_dim: int :param variance: the variance of the kernel :type variance: float """ super(Fixed, self).__init__(input_dim, variance, active_dims, name) self.fixed_K = covariance_matrix def K(self, X, X2): if X2 is None: return self.variance * self.fixed_K else: return np.zeros((X.shape[0], X2.shape[0])) def Kdiag(self, X): return self.variance * self.fixed_K.diagonal() def update_gradients_full(self, dL_dK, X, X2=None): if X2 is None: self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K) else: self.variance.gradient = 0 def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = np.einsum('i,i', dL_dKdiag, np.diagonal(self.fixed_K)) def psi2(self, Z, variational_posterior): return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) def psi2n(self, Z, variational_posterior): return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64) def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): self.variance.gradient = dL_dpsi0.sum() class Precomputed(Fixed): def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='precomputed'): """ Class for precomputed kernels, indexed by columns in X Usage example: import numpy as np from GPy.models import GPClassification from GPy.kern import Precomputed from sklearn.cross_validation import LeaveOneOut n = 10 d = 100 X = np.arange(n).reshape((n,1)) # column vector of indices y = 2*np.random.binomial(1,0.5,(n,1))-1 X0 = np.random.randn(n,d) k = np.dot(X0,X0.T) kern = Precomputed(1,k) # k is a n x n covariance matrix cv = LeaveOneOut(n) ypred = y.copy() for train, test in cv: m = GPClassification(X[train], y[train], kernel=kern) m.optimize() ypred[test] = 2*(m.predict(X[test])[0]>0.5)-1 :param input_dim: the number of input dimensions :type input_dim: int :param variance: the variance of the kernel :type variance: float """ assert input_dim==1, "Precomputed only implemented in one dimension. Use multiple Precomputed kernels to have more dimensions by making use of active_dims" super(Precomputed, self).__init__(input_dim, covariance_matrix, variance, active_dims, name) @Cache_this(limit=2) def _index(self, X, X2): if X2 is None: i1 = i2 = X.astype('int').flat else: i1, i2 = X.astype('int').flat, X2.astype('int').flat return self.fixed_K[i1,:][:,i2] def K(self, X, X2=None): return self.variance * self._index(X, X2) def Kdiag(self, X): return self.variance * self._index(X,None).diagonal() def update_gradients_full(self, dL_dK, X, X2=None): self.variance.gradient = np.einsum('ij,ij', dL_dK, self._index(X, X2)) def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = np.einsum('i,ii', dL_dKdiag, self._index(X, None))
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="slacksdk", version="0.0.1a", author="Thanakrit Juthamongkhon", author_email="thanakrit.ju.work@gmail.com", description="A minimal slack sdk", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/thanakritju/python-slack-events-sdk", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
# -*- coding: utf-8 -*- # # Copyright (C) 2020 Grzegorz Jacenków. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """Training and evaluation pipeline for the networks.""" import csv import os import tensorflow as tf from tensorflow.keras.metrics import Mean from inside import config from inside.callbacks import setup_callbacks from inside.constructor import setup_comet_ml, setup_model from inside.loaders import CLEVR from inside.metrics import DiceScore def _write_results(logs): """Write final logs to a CSV file.""" w = csv.writer(open(os.path.join( config.EXPERIMENT_FOLDER, "results.csv"), "w")) for key, val in logs.items(): w.writerow([key, val]) class Pipeline: def __init__(self): # Model. self.model = setup_model() # Comet.ml experiment. self.comet_ml = setup_comet_ml() # Testing metrics. self.test_dice = DiceScore(name="testing_dice") self.test_loss = Mean(name="testing_loss") # Training metrics. self.training_dice = DiceScore(name="training_dice") self.training_loss = Mean(name="training_loss") # Callbacks. self.cl, self.es, self.mc, self.pp = setup_callbacks() self.cl.model, self.es.model, self.mc.model = \ self.model, self.model, self.model self.pp.model = self.model self.pp.comet_ml = self.comet_ml def fit(self): """Train the model.""" # Toy dataset. loader = CLEVR() train_ds, valid_ds, test_ds = loader.load() with self.comet_ml.train(): self.cl.on_train_begin() self.es.on_train_begin() self.mc.on_train_begin() self.pp.on_train_begin() for epoch in range(config.EXPERIMENT_EPOCHS): self.comet_ml.set_epoch(epoch) for images, labels in train_ds: self.train_step(images, labels) for batch, (images, labels) in enumerate(valid_ds): self.test_step(images, labels) if not batch: # Log only first mini-batch from an epoch. self.pp.on_epoch_end(epoch, images, labels) # Get results. logs = { "dice": self.training_dice.result().numpy(), "loss": self.training_loss.result().numpy(), "validation_dice": self.test_dice.result().numpy(), "validation_loss": self.test_loss.result().numpy(), } template = ("Epoch {}. Training Loss: {}. Training Dice: {}. " "Validation Loss: {}. Validation Dice: {}.") print(template.format(epoch + 1, logs['loss'], logs['dice'], logs['validation_loss'], logs['validation_dice'])) # Log metrics. self.comet_ml.log_metrics(logs, epoch=epoch) self.cl.on_epoch_end(epoch, logs) self.es.on_epoch_end(epoch, logs) self.mc.on_epoch_end(epoch, logs) # Reset the metrics for the next epoch. self.training_dice.reset_states() self.training_loss.reset_states() self.test_dice.reset_states() self.test_loss.reset_states() # Early stopping criterion. if self.es.model.stop_training: self.cl.on_train_end() self.es.on_train_end() self.mc.on_train_end() break with self.comet_ml.test(): for batch, (images, labels) in enumerate(test_ds): self.test_step(images, labels) if not batch: self.pp.on_test_end(images, labels) # Get results. logs = { "dice": self.test_dice.result().numpy(), "loss": self.test_loss.result().numpy(), } print("Test Loss: {}. Test Dice: {}.".format( logs['loss'], logs['dice'])) # Log metrics. self.comet_ml.log_metrics(logs) _write_results(logs) @tf.function def train_step(self, images, labels): with tf.GradientTape() as tape: predictions = self.model.inference(images) loss = self.model.loss(labels, predictions) gradients = tape.gradient(loss, self.model.trainable_variables) self.model.optimiser.apply_gradients( zip(gradients, self.model.trainable_variables)) self.training_loss(loss) self.training_dice(labels, predictions) @tf.function def test_step(self, images, labels): predictions = self.model.inference(images) t_loss = self.model.loss(labels, predictions) self.test_loss(t_loss) self.test_dice(labels, predictions)
from openmmtools import testsystems from simtk.openmm.app import * import simtk.unit as unit import logging import numpy as np from openmmtools.constants import kB from openmmtools import respa, utils logger = logging.getLogger(__name__) # Energy unit used by OpenMM unit system from openmmtools import states, integrators import time import numpy as np import sys import os def get_rotation_matrix(): """ Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: Nx3 array, original point clouds Return: Nx3 array, rotated point clouds """ angles = np.random.uniform(-1.0, 1.0, size=(3,)) * np.pi print(f'Using angle: {angles}') Rx = np.array([[1., 0, 0], [0, np.cos(angles[0]), -np.sin(angles[0])], [0, np.sin(angles[0]), np.cos(angles[0])]], dtype=np.float32) Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])], [0, 1, 0], [-np.sin(angles[1]), 0, np.cos(angles[1])]], dtype=np.float32) Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0], [np.sin(angles[2]), np.cos(angles[2]), 0], [0, 0, 1]], dtype=np.float32) rotation_matrix = np.matmul(Rz, np.matmul(Ry, Rx)) return rotation_matrix def center_positions(pos): offset = np.mean(pos, axis=0) return pos - offset, offset BOX_SCALE = 2 DT = 2 for seed in range(10): print(f'Running seed: {seed}') waterbox = testsystems.WaterBox( box_edge=2 * unit.nanometers, model='tip4pew') [topology, system, positions] = [waterbox.topology, waterbox.system, waterbox.positions] R = get_rotation_matrix() positions = positions.value_in_unit(unit.angstrom) positions, off = center_positions(positions) positions = np.matmul(positions, R) positions += off positions += np.random.randn(positions.shape[0], positions.shape[1]) * 0.005 positions *= unit.angstrom p_num = positions.shape[0] // 3 timestep = DT * unit.femtoseconds temperature = 300 * unit.kelvin chain_length = 10 friction = 1. / unit.picosecond num_mts = 5 num_yoshidasuzuki = 5 integrator = integrators.NoseHooverChainVelocityVerletIntegrator(system, temperature, friction, timestep, chain_length, num_mts, num_yoshidasuzuki) simulation = Simulation(topology, system, integrator) simulation.context.setPositions(positions) simulation.context.setVelocitiesToTemperature(temperature) simulation.minimizeEnergy(tolerance=1*unit.kilojoule/unit.mole) simulation.step(1) os.makedirs(f'./water_data_tip4p/', exist_ok=True) dataReporter_gt = StateDataReporter(f'./log_nvt_tip4p_{seed}.txt', 50, totalSteps=50000, step=True, time=True, speed=True, progress=True, elapsedTime=True, remainingTime=True, potentialEnergy=True, kineticEnergy=True, totalEnergy=True, temperature=True, separator='\t') simulation.reporters.append(dataReporter_gt) for t in range(1000): if (t+1)%100 == 0: print(f'Finished {(t+1)*50} steps') state = simulation.context.getState(getPositions=True, getVelocities=True, getForces=True, enforcePeriodicBox=True) pos = state.getPositions(asNumpy=True).value_in_unit(unit.angstrom) vel = state.getVelocities(asNumpy=True).value_in_unit(unit.meter / unit.second) force = state.getForces(asNumpy=True).value_in_unit(unit.kilojoules_per_mole/unit.nanometer) np.savez(f'./water_data_tip4p/data_{seed}_{t}.npz', pos=pos, vel=vel, forces=force) simulation.step(50)
import os import hashlib def _update_sha256(filename, sha256): """ Updates a SHA-256 algorithm with the filename and the contents of a file. """ block_size = 64 * 1024 # 64 KB with open(filename, 'rb') as input_file: while True: data = input_file.read(block_size) if not data: break sha256.update(data) sha256.update(filename.encode("utf-8")) return sha256 def hash_tree(root): """ Returns a cryptographically secure hash for a whole directory tree taking into account the names and the content of the files. """ file_list = [] for root_directory, directories, files in os.walk(root): for file in files: file_list.append(os.path.join(root_directory, file)) sorted_file_list = sorted(file_list) sha256 = hashlib.sha256() for file in sorted_file_list: _update_sha256(file, sha256) return sha256.hexdigest()
def destructure(obj, *params): import operator return operator.itemgetter(*params)(obj) def greet(**kwargs): year, day, puzzle = destructure(kwargs, 'year', 'day', 'puzzle') print('Advent of Code') print(f'-> {year}-{day}-{puzzle}') print('--------------') def load_data(filename): with filename.open('r') as handle: return handle.read() def start(fn): import pathlib base_path = pathlib.Path(__file__).parent.parent / 'data' def wrapped(*args, **kwargs): greet(**kwargs) data = load_data(base_path / f'{kwargs["year"]}.{kwargs["day"]}.txt') return fn(data, *args, **kwargs) return wrapped def flatten_json(nested_json): out = {} def flatten(x, name=''): if type(x) is dict: for a in x: flatten(x[a], name + a + '_') elif type(x) is list: for i, a in enumerate(x): flatten(a, name + str(i) + '_') else: out[name[:-1]] = x flatten(nested_json) return out def sparse_matrix(): from collections import defaultdict return defaultdict(lambda: 0)
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. # external from mixbox import fields import cybox.common from cybox.common.tools import ToolInformationList # internal import stix import stix.bindings.stix_common as stix_common_binding # relative from .vocabs import VocabField from .references import References from .identity import Identity, IdentityFactory from .structured_text import StructuredTextList class InformationSource(stix.Entity): _binding = stix_common_binding _binding_class = stix_common_binding.InformationSourceType _namespace = 'http://stix.mitre.org/common-1' identity = fields.TypedField("Identity", type_=Identity, factory=IdentityFactory) descriptions = fields.TypedField("Description", StructuredTextList) contributing_sources = fields.TypedField("Contributing_Sources", type_="stix.common.information_source.ContributingSources") time = fields.TypedField("Time", cybox.common.Time) roles = VocabField("Role", multiple=True, key_name="roles") tools = fields.TypedField("Tools", ToolInformationList) references = fields.TypedField("References", References) def __init__(self, description=None, identity=None, time=None, tools=None, contributing_sources=None, references=None): super(InformationSource, self).__init__() self.identity = identity self.descriptions = StructuredTextList(description) self.contributing_sources = contributing_sources self.time = time self.tools = tools self.references = references #self.roles = None def add_contributing_source(self, value): self.contributing_sources.append(value) def add_reference(self, value): if not value: return # TODO: Check if it's a valid URI? self.references.append(value) @property def description(self): """A single description about the contents or purpose of this object. Default Value: ``None`` Note: If this object has more than one description set, this will return the description with the lowest ordinality value. Returns: An instance of :class:`.StructuredText` """ return next(iter(self.descriptions), None) @description.setter def description(self, value): from stix.common.structured_text import StructuredTextList self.descriptions = StructuredTextList(value) def add_description(self, description): """Adds a description to the ``descriptions`` collection. This is the same as calling "foo.descriptions.add(bar)". """ self.descriptions.add(description) def add_role(self, value): self.roles.append(value) class ContributingSources(stix.EntityList): _namespace = "http://stix.mitre.org/common-1" _binding = stix_common_binding _binding_class = stix_common_binding.ContributingSourcesType source = fields.TypedField("Source", InformationSource, multiple=True, key_name="sources") @classmethod def _dict_as_list(cls): return False
import copy from types import GeneratorType class MergeDict(object): """ A simple class for creating new "virtual" dictionaries that actually look up values in more than one dictionary, passed in the constructor. If a key appears in more than one of the given dictionaries, only the first occurrence will be used. """ def __init__(self, *dicts): self.dicts = dicts def __getitem__(self, key): for dict_ in self.dicts: try: return dict_[key] except KeyError: pass raise KeyError def __copy__(self): return self.__class__(*self.dicts) def get(self, key, default=None): try: return self[key] except KeyError: return default def getlist(self, key): for dict_ in self.dicts: if key in dict_.keys(): return dict_.getlist(key) return [] def iteritems(self): seen = set() for dict_ in self.dicts: for item in dict_.iteritems(): k, v = item if k in seen: continue seen.add(k) yield item def iterkeys(self): for k, v in self.iteritems(): yield k def itervalues(self): for k, v in self.iteritems(): yield v def items(self): return list(self.iteritems()) def keys(self): return list(self.iterkeys()) def values(self): return list(self.itervalues()) def has_key(self, key): for dict_ in self.dicts: if key in dict_: return True return False __contains__ = has_key __iter__ = iterkeys def copy(self): """Returns a copy of this object.""" return self.__copy__() def __str__(self): ''' Returns something like "{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}" instead of the generic "<object meta-data>" inherited from object. ''' return str(dict(self.items())) def __repr__(self): ''' Returns something like MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'}) instead of generic "<object meta-data>" inherited from object. ''' dictreprs = ', '.join(repr(d) for d in self.dicts) return '%s(%s)' % (self.__class__.__name__, dictreprs) class SortedDict(dict): """ A dictionary that keeps its keys in the order in which they're inserted. """ def __new__(cls, *args, **kwargs): instance = super(SortedDict, cls).__new__(cls, *args, **kwargs) instance.keyOrder = [] return instance def __init__(self, data=None): if data is None: data = {} elif isinstance(data, GeneratorType): # Unfortunately we need to be able to read a generator twice. Once # to get the data into self with our super().__init__ call and a # second time to setup keyOrder correctly data = list(data) super(SortedDict, self).__init__(data) if isinstance(data, dict): self.keyOrder = data.keys() else: self.keyOrder = [] seen = set() for key, value in data: if key not in seen: self.keyOrder.append(key) seen.add(key) def __deepcopy__(self, memo): return self.__class__([(key, copy.deepcopy(value, memo)) for key, value in self.iteritems()]) def __copy__(self): # The Python's default copy implementation will alter the state # of self. The reason for this seems complex but is likely related to # subclassing dict. return self.copy() def __setitem__(self, key, value): if key not in self: self.keyOrder.append(key) super(SortedDict, self).__setitem__(key, value) def __delitem__(self, key): super(SortedDict, self).__delitem__(key) self.keyOrder.remove(key) def __iter__(self): return iter(self.keyOrder) def pop(self, k, *args): result = super(SortedDict, self).pop(k, *args) try: self.keyOrder.remove(k) except ValueError: # Key wasn't in the dictionary in the first place. No problem. pass return result def popitem(self): result = super(SortedDict, self).popitem() self.keyOrder.remove(result[0]) return result def items(self): return zip(self.keyOrder, self.values()) def iteritems(self): for key in self.keyOrder: yield key, self[key] def keys(self): return self.keyOrder[:] def iterkeys(self): return iter(self.keyOrder) def values(self): return map(self.__getitem__, self.keyOrder) def itervalues(self): for key in self.keyOrder: yield self[key] def update(self, dict_): for k, v in dict_.iteritems(): self[k] = v def setdefault(self, key, default): if key not in self: self.keyOrder.append(key) return super(SortedDict, self).setdefault(key, default) def value_for_index(self, index): """Returns the value of the item at the given zero-based index.""" return self[self.keyOrder[index]] def insert(self, index, key, value): """Inserts the key, value pair before the item with the given index.""" if key in self.keyOrder: n = self.keyOrder.index(key) del self.keyOrder[n] if n < index: index -= 1 self.keyOrder.insert(index, key) super(SortedDict, self).__setitem__(key, value) def copy(self): """Returns a copy of this object.""" # This way of initializing the copy means it works for subclasses, too. return self.__class__(self) def __repr__(self): """ Replaces the normal dict.__repr__ with a version that returns the keys in their sorted order. """ return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()]) def clear(self): super(SortedDict, self).clear() self.keyOrder = [] class MultiValueDictKeyError(KeyError): pass class MultiValueDict(dict): """ A subclass of dictionary customized to handle multiple values for the same key. >>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']}) >>> d['name'] 'Simon' >>> d.getlist('name') ['Adrian', 'Simon'] >>> d.getlist('doesnotexist') [] >>> d.getlist('doesnotexist', ['Adrian', 'Simon']) ['Adrian', 'Simon'] >>> d.get('lastname', 'nonexistent') 'nonexistent' >>> d.setlist('lastname', ['Holovaty', 'Willison']) This class exists to solve the irritating problem raised by cgi.parse_qs, which returns a list for every key, even though most Web forms submit single name-value pairs. """ def __init__(self, key_to_list_mapping=()): super(MultiValueDict, self).__init__(key_to_list_mapping) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, super(MultiValueDict, self).__repr__()) def __getitem__(self, key): """ Returns the last data value for this key, or [] if it's an empty list; raises KeyError if not found. """ try: list_ = super(MultiValueDict, self).__getitem__(key) except KeyError: raise MultiValueDictKeyError("Key %r not found in %r" % (key, self)) try: return list_[-1] except IndexError: return [] def __setitem__(self, key, value): super(MultiValueDict, self).__setitem__(key, [value]) def __copy__(self): return self.__class__([ (k, v[:]) for k, v in self.lists() ]) def __deepcopy__(self, memo=None): if memo is None: memo = {} result = self.__class__() memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def __getstate__(self): obj_dict = self.__dict__.copy() obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self]) return obj_dict def __setstate__(self, obj_dict): data = obj_dict.pop('_data', {}) for k, v in data.items(): self.setlist(k, v) self.__dict__.update(obj_dict) def get(self, key, default=None): """ Returns the last data value for the passed key. If key doesn't exist or value is an empty list, then default is returned. """ try: val = self[key] except KeyError: return default if val == []: return default return val def getlist(self, key, default=None): """ Returns the list of values for the passed key. If key doesn't exist, then a default value is returned. """ try: return super(MultiValueDict, self).__getitem__(key) except KeyError: if default is None: return [] return default def setlist(self, key, list_): super(MultiValueDict, self).__setitem__(key, list_) def setdefault(self, key, default=None): if key not in self: self[key] = default return default return self[key] def setlistdefault(self, key, default_list=None): if key not in self: if default_list is None: default_list = [] self.setlist(key, default_list) return default_list return self.getlist(key) def appendlist(self, key, value): """Appends an item to the internal list associated with key.""" self.setlistdefault(key).append(value) def items(self): """ Returns a list of (key, value) pairs, where value is the last item in the list associated with the key. """ return [(key, self[key]) for key in self.keys()] def iteritems(self): """ Yields (key, value) pairs, where value is the last item in the list associated with the key. """ for key in self.keys(): yield (key, self[key]) def lists(self): """Returns a list of (key, list) pairs.""" return super(MultiValueDict, self).items() def iterlists(self): """Yields (key, list) pairs.""" return super(MultiValueDict, self).iteritems() def values(self): """Returns a list of the last value on every key list.""" return [self[key] for key in self.keys()] def itervalues(self): """Yield the last value on every key list.""" for key in self.iterkeys(): yield self[key] def copy(self): """Returns a shallow copy of this object.""" return copy.copy(self) def update(self, *args, **kwargs): """ update() extends rather than replaces existing key lists. Also accepts keyword args. """ if len(args) > 1: raise TypeError("update expected at most 1 arguments, got %d" % len(args)) if args: other_dict = args[0] if isinstance(other_dict, MultiValueDict): for key, value_list in other_dict.lists(): self.setlistdefault(key).extend(value_list) else: try: for key, value in other_dict.items(): self.setlistdefault(key).append(value) except TypeError: raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary") for key, value in kwargs.iteritems(): self.setlistdefault(key).append(value) def dict(self): """ Returns current object as a dict with singular values. """ return dict((key, self[key]) for key in self) class DotExpandedDict(dict): """ A special dictionary constructor that takes a dictionary in which the keys may contain dots to specify inner dictionaries. It's confusing, but this example should make sense. >>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \ 'person.1.lastname': ['Willison'], \ 'person.2.firstname': ['Adrian'], \ 'person.2.lastname': ['Holovaty']}) >>> d {'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}} >>> d['person'] {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}} >>> d['person']['1'] {'lastname': ['Willison'], 'firstname': ['Simon']} # Gotcha: Results are unpredictable if the dots are "uneven": >>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1}) {'c': 1} """ def __init__(self, key_to_list_mapping): for k, v in key_to_list_mapping.items(): current = self bits = k.split('.') for bit in bits[:-1]: current = current.setdefault(bit, {}) # Now assign value to current position try: current[bits[-1]] = v except TypeError: # Special-case if current isn't a dict. current = {bits[-1]: v} class ImmutableList(tuple): """ A tuple-like object that raises useful errors when it is asked to mutate. Example:: >>> a = ImmutableList(range(5), warning="You cannot mutate this.") >>> a[3] = '4' Traceback (most recent call last): ... AttributeError: You cannot mutate this. """ def __new__(cls, *args, **kwargs): if 'warning' in kwargs: warning = kwargs['warning'] del kwargs['warning'] else: warning = 'ImmutableList object is immutable.' self = tuple.__new__(cls, *args, **kwargs) self.warning = warning return self def complain(self, *wargs, **kwargs): if isinstance(self.warning, Exception): raise self.warning else: raise AttributeError(self.warning) # All list mutation functions complain. __delitem__ = complain __delslice__ = complain __iadd__ = complain __imul__ = complain __setitem__ = complain __setslice__ = complain append = complain extend = complain insert = complain pop = complain remove = complain sort = complain reverse = complain class DictWrapper(dict): """ Wraps accesses to a dictionary so that certain values (those starting with the specified prefix) are passed through a function before being returned. The prefix is removed before looking up the real value. Used by the SQL construction code to ensure that values are correctly quoted before being used. """ def __init__(self, data, func, prefix): super(DictWrapper, self).__init__(data) self.func = func self.prefix = prefix def __getitem__(self, key): """ Retrieves the real value after stripping the prefix string (if present). If the prefix is present, pass the value through self.func before returning, otherwise return the raw value. """ if key.startswith(self.prefix): use_func = True key = key[len(self.prefix):] else: use_func = False value = super(DictWrapper, self).__getitem__(key) if use_func: return self.func(value) return value
import os import sys import dlib import glob import csv import pickle as pp from sklearn.neighbors import KNeighborsClassifier import pandas as pd from sklearn import preprocessing # from sklearn.model_selection import train_test_split import webbrowser from timeit import Timer from keras.preprocessing.image import img_to_array from keras.models import load_model import numpy as np from time import time import time import multiprocessing from flask import Flask, render_template, request from PIL import Image from elasticsearch import Elasticsearch from tensorflow.python.keras._impl.keras.preprocessing.image import img_to_array from twilio.rest import Client from flask import Flask, render_template, request, url_for app = Flask(__name__, template_folder='templates') App_root=os.path.dirname("maintype") @app.route("/knn") def classify(try_vector): #CLASIFIER OPTION -A using KNN start_time = time.time() print("in classifier======================================================") p_1=pp.load(open('model.p','rb')) p_2=pp.load(open('model_1.p','rb')) pred = p_1.predict([try_vector]) v = p_2.inverse_transform(pred) print(p_2.inverse_transform(pred)) print("My program took", time.time() - start_time, "to run") return v def vector(destination,option): ###CONVERTING IMAGE INTO 128 vectors --DLIB predictor_path = "shape_predictor_5_face_landmarks.dat" face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat" faces_folder_path ="/home/sethiamayank14/PycharmProjects/project2/src/"+destination detector = dlib.get_frontal_face_detector() sp = dlib.shape_predictor(predictor_path) facerec = dlib.face_recognition_model_v1(face_rec_model_path) img = dlib.load_rgb_image(faces_folder_path) dets = detector(img, 1) for k, d in enumerate(dets): shape = sp(img, d) face_descriptor = facerec.compute_face_descriptor(img, shape) try_vector=face_descriptor #print("======================================",try_vector) if option == "KNN": d = classify(try_vector) #knn print(d) # if(d=="Akash Bhaiya"): # # account_sid = 'AC48a2b57630cde3ad7acc662ea91cf5fd' # auth_token = '101da4d773c821ed0c60d7f7dd17cb98' # client = Client(account_sid, auth_token) # # message = client.messages \ # .create( # body="Employee Akash entered", # from_='+15052786996', # to='+918826748151' # ) # # print(message.sid) # else: # account_sid = 'AC48a2b57630cde3ad7acc662ea91cf5fd' # auth_token = '101da4d773c821ed0c60d7f7dd17cb98' # client = Client(account_sid, auth_token) # # message = client.messages \ # .create( # body="intruder detected", # from_='+15052786996', # to='+918826748151' # ) # # print(message.sid) return d @app.route("/") # this runs first def index(): print("index working==================================") return render_template("upload1.html") @app.route("/upload", methods = ['POST']) def upload(): # print("heyy========================") target = os.path.join(App_root, "images/") # print("hello") if not os.path.isdir(target): print("In here") os.mkdir(target) print("-----------------------",request.files.getlist("file")) for file in request.files.getlist("file"): filename = file.filename destination ="".join([target, filename]) print(destination) file.save(destination) option = request.form['classifier'] print(option) if( option == "KNN"): name1 = vector(destination,option) name1 = str(name1[0]) print(name1, type(name1)) f = open('helloworld.html', 'w') # name = "Akash Bhaiya" name = name1 + '.jpg' print(name) name2 = "/home/sethiamayank14/PycharmProjects/project2/src/images/"+ name print(name2) message = """<html> <head></head> <body> <p>Your input image: </p> <br> <img src = "/home/sethiamayank14/PycharmProjects/project2/src/""" + destination + """"/> <br> <p>Standard Image:</p> <br> <img src = "/home/sethiamayank14/PycharmProjects/project2/src/images/""" + name + """"/> <p> """ + name1 + """</p> </body> </html>""" print(message) f.write(message) f.close() # Change path to reflect file location filename = 'helloworld.html' webbrowser.open_new_tab(filename) return name # return name if __name__== "__main__": app.run(debug=True,port=5001,host='127.0.0.1')
from dyn2sel.dcs_techniques import DCSTechnique import numpy as np from scipy.stats import mode class DESDDSel(DCSTechnique): def predict(self, ensemble, instances, real_labels=None): return ensemble[ensemble.get_max_accuracy()].predict(instances)
""" This module provide the case to test the coexistance between TDX guest and non TD guest. There are two types of non-TD guest: 1. Boot with legacy BIOS, it is default loader without pass "-loader" or "-bios" option 2. Boot with OVMF UEFI BIOS, will boot with "-loader" => OVMFD.fd compiled from the latest edk2 project. """ import logging import pytest from pycloudstack.vmparam import VM_TYPE_LEGACY, VM_TYPE_EFI, VM_TYPE_TD __author__ = 'cpio' LOG = logging.getLogger(__name__) # pylint: disable=invalid-name pytestmark = [ pytest.mark.vm_image("latest-guest-image"), pytest.mark.vm_kernel("latest-guest-kernel"), ] def test_tdguest_with_legacy_base(vm_factory): """ Test the different type VM run parallel Test Steps ---------- 1. Launch a TD guest 2. Launch a legacy guest 3. Launch an OVMF guest """ LOG.info("Create a TD guest") td_inst = vm_factory.new_vm(VM_TYPE_TD, auto_start=True) LOG.info("Create a legacy guest") legacy_inst = vm_factory.new_vm(VM_TYPE_LEGACY, auto_start=True) LOG.info("Create an OVMF guest") efi_inst = vm_factory.new_vm(VM_TYPE_EFI, auto_start=True) assert td_inst.wait_for_ssh_ready(), "Could not reach TD VM" assert legacy_inst.wait_for_ssh_ready(), "Could not reach legacy VM" assert efi_inst.wait_for_ssh_ready(), "Could not reach EFI VM"
import sys import pytest from pre_commit_hooks.loaderon_hooks.tests.util.test_helpers import perform_test_on_file_expecting_result from pre_commit_hooks.loaderon_hooks.general_hooks.check_location import main @pytest.fixture(autouse=True) def clean_sys_argv(): sys.argv = [] # Each line is a directory that allows certain types of files. sys.argv.append('--directories') sys.argv.append(r'.*\/xml') sys.argv.append('--directories') sys.argv.append(r'.*\/javascript') # Each line specifies what types of files can be located inside the directory. sys.argv.append('--files') sys.argv.append(r'correct_xml.xml') sys.argv.append('--files') sys.argv.append(r'correct_js.js') yield def test_locations_ok_1(): perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main) def test_locations_ok_2(): perform_test_on_file_expecting_result('check_location_samples/javascript/correct_js.js', main) def test_locations_error1(): perform_test_on_file_expecting_result('check_location_samples/xml/incorrect_js.js', main, expected_result=2) def test_locations_error2(): perform_test_on_file_expecting_result('check_location_samples/not_enabled_directory/incorrect_xml.xml', main, expected_result=2) def test_locations_arguments_size_mismatch_error(): sys.argv = [] sys.argv.append('--directories') sys.argv.append(r'.*\/xml') # Lacking files for this directory sys.argv.append('--directories') sys.argv.append(r'.*\/javascript') sys.argv.append('--files') sys.argv.append(r'correct_xml.xml') perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main, expected_result=2) def test_locations_no_arguments_error(): sys.argv = [] with pytest.raises(TypeError) as error: perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main) assert "'NoneType' object is not iterable" in str(error.value)
""" 1) "a" + "bc" -> abc 2) 3 * "bc" -> bcbcbc 3) "3" * "bc" -> error as we can't use the * operator on two strings 4) abcd"[2] -> c (Just takes the character at index 2 in the string. a has index 0 and b index 1) 5) "abcd"[0:2] -> ab (Returns the substring from index 0 all the way to index n -1 in this case b) 6) "abcd"[:2] -> ab (Not giving a starting value to slice the string we start at 0) 7) "abcd"[2:] -> cd (When we don't give an end value it goes all the way to the end of the string) """
exp_name = 'basicvsr_vimeo90k_bd' # model settings model = dict( type='BasicVSR', generator=dict( type='BasicVSRNet', mid_channels=64, num_blocks=30, spynet_pretrained='pretrained_models/spynet.pth'), pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean')) # model training and testing settings train_cfg = dict(fix_iter=5000) test_cfg = dict(metrics=['PSNR'], crop_border=0, convert_to='y') # dataset settings train_dataset_type = 'SRVimeo90KMultipleGTDataset' val_dataset_type = 'SRTestMultipleGTDataset' test_dataset_type = 'SRVimeo90KDataset' train_pipeline = [ dict( type='LoadImageFromFileList', io_backend='disk', key='lq', channel_order='rgb'), dict( type='LoadImageFromFileList', io_backend='disk', key='gt', channel_order='rgb'), dict(type='RescaleToZeroOne', keys=['lq', 'gt']), dict(type='PairedRandomCrop', gt_patch_size=256), dict( type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='horizontal'), dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'), dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5), dict(type='MirrorSequence', keys=['lq', 'gt']), dict(type='FramesToTensor', keys=['lq', 'gt']), dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']) ] val_pipeline = [ dict(type='GenerateSegmentIndices', interval_list=[1]), dict( type='LoadImageFromFileList', io_backend='disk', key='lq', channel_order='rgb'), dict( type='LoadImageFromFileList', io_backend='disk', key='gt', channel_order='rgb'), dict(type='RescaleToZeroOne', keys=['lq', 'gt']), dict(type='FramesToTensor', keys=['lq', 'gt']), dict( type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path', 'key']) ] test_pipeline = [ dict( type='LoadImageFromFileList', io_backend='disk', key='lq', channel_order='rgb'), dict( type='LoadImageFromFileList', io_backend='disk', key='gt', channel_order='rgb'), dict(type='RescaleToZeroOne', keys=['lq', 'gt']), dict(type='MirrorSequence', keys=['lq']), dict(type='FramesToTensor', keys=['lq', 'gt']), dict( type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path', 'key']) ] data = dict( workers_per_gpu=6, train_dataloader=dict(samples_per_gpu=4, drop_last=True), # 2 gpus val_dataloader=dict(samples_per_gpu=1), test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1), # train train=dict( type='RepeatDataset', times=1000, dataset=dict( type=train_dataset_type, lq_folder='data/vimeo90k/BDx4', gt_folder='data/vimeo90k/GT', ann_file='data/vimeo90k/meta_info_Vimeo90K_train_GT.txt', pipeline=train_pipeline, scale=4, test_mode=False)), # val val=dict( type=val_dataset_type, lq_folder='data/Vid4/BDx4', gt_folder='data/Vid4/GT', pipeline=val_pipeline, scale=4, test_mode=True), # test test=dict( type=test_dataset_type, lq_folder='data/vimeo90k/BDx4', gt_folder='data/vimeo90k/GT', ann_file='data/vimeo90k/meta_info_Vimeo90K_test_GT.txt', pipeline=test_pipeline, scale=4, num_input_frames=7, test_mode=True), ) # optimizer optimizers = dict( generator=dict( type='Adam', lr=2e-4, betas=(0.9, 0.99), paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.125)}))) # learning policy total_iters = 300000 lr_config = dict( policy='CosineRestart', by_epoch=False, periods=[300000], restart_weights=[1], min_lr=1e-7) checkpoint_config = dict(interval=5, save_optimizer=True, by_epoch=False) # remove gpu_collect=True in non distributed training evaluation = dict(interval=5000, save_image=False, gpu_collect=True) log_config = dict( interval=100, hooks=[ dict(type='TextLoggerHook', by_epoch=False), # dict(type='TensorboardLoggerHook'), ]) visual_config = None # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/{exp_name}' load_from = None resume_from = None workflow = [('train', 1)]
#!/usr/bin/env python # -*- encoding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from glob import glob from os.path import basename from os.path import splitext from setuptools import find_packages from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name='evgflip', version='0.1.0', license='Apache License, Version 2.0', description='', long_description=long_description, long_description_content_type='text/markdown', author='David Bradford', author_email='david.bradford@mongodb.com', url='https://github.com/dbradf/evgflip', packages=find_packages('src'), package_dir={'': 'src'}, py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')], include_package_data=True, zip_safe=False, classifiers=[ 'Intended Audience :: Developers', 'Operating System :: Unix', 'Operating System :: POSIX', 'Operating System :: Microsoft :: Windows', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ], install_requires=[ 'boltons==19.1.0', 'Click==7.0', 'evergreen.py==0.5.0', 'PyYAML==5.4', 'structlog==19.1.0', ], entry_points=''' [console_scripts] evg-flip=evgflip.cli:main ''', )