text
stringlengths 2
999k
|
|---|
from numpy.polynomial import *
|
#! /usr/bin/env python3
import click
import serial
from enum import Enum, auto
from intelhex import IntelHex,IntelHexError
import constants
@click.command()
@click.argument(
'file',
required=True,
type=click.Path(dir_okay=False, writable=True)
)
@click.option(
'--append', '-a',
help='append data to hex file',
is_flag=True
)
@click.option(
'--port', '-p',
required=True,
help='serial port where ELF is connected'
)
@click.option(
'--baud', '-b',
help='serial baud rate',
type=int,
default=9600
)
def main(file, append, port, baud):
"""
Receive a code file from an attached ELF with MAX binary sender.
The program reads a MAX-format binary file from the specified
serial port and stores in the given file.
"""
class State(Enum):
DATA = auto()
ESCAPE = auto()
ADDR_HI = auto()
ADDR_LO = auto()
DONE = auto()
state = State.DATA
address = 0
intel_hex = IntelHex()
if append:
intel_hex.loadhex(file)
with serial.serial_for_url(port) as ser:
ser.baudrate = baud
ser.write(constants.START_RECV)
while state != State.DONE:
data = ser.read(ser.in_waiting)
for byte in data:
if state == State.DATA:
if byte == constants.ESCAPE:
state = State.ESCAPE
elif byte == constants.END_OF_FILE:
state = State.DONE
elif byte == constants.NEW_ADDRESS:
state = State.ADDR_HI
else:
intel_hex[address] = byte
address += 1
elif state == State.ESCAPE:
intel_hex[address] = byte ^ 0x20
address += 1
state = State.DATA
elif state == State.ADDR_HI:
address = byte << 8
state = State.ADDR_LO
elif state == State.ADDR_LO:
address |= byte
state = State.DATA
intel_hex.write_hex_file(file)
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
from flask import Flask, render_template, redirect, abort, send_file
from flaskext.markdown import Markdown
import os.path
from config import config
app = Flask(__name__)
Markdown(app)
site_title=config['site_title']
site_all_notification=config['site_all_notification']
footer='<small class="m-0 text-center text-white">'+config['footer_text']+'</small>'
root_directory=config['root_directory']
analytics=config['analytics']
seo_author=config['seo_author']
seo_description=config['seo_description']
@app.errorhandler(403)
def forbidden(e):
return render_template('403.html'), 403
@app.errorhandler(404)
def page_not_found(e):
return redirect('/pages/errors/404'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/')
def redirect_index():
return redirect('/pages/home')
@app.route('/pages/<path:request_path>')
def render_markdown(request_path):
path=root_directory+'/markdown/'+request_path+'.md'
check = os.path.isfile(path)
if check == False:
abort(404)
with open(path, 'r') as markdown_file:
md = markdown_file.read()
markdown_file.close()
return render_template('main_bootstrap_frame.html', md=md,site_all_notification=site_all_notification,site_title=site_title,footer=footer,seo_author=seo_author,seo_description=seo_description)
@app.route('/downloads/<path:file_path>')
def send_a_file(file_path):
file_path=root_directory+'/documents/'+file_path
return send_file(file_path)
|
# -*- coding: utf-8 -*-
#
# tgs documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tgs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tgsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'tgs.tex',
u'tgs Documentation',
u"hayata-yamamotoo", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tgs', u'tgs Documentation',
[u"hayata-yamamotoo"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tgs', u'tgs Documentation',
u"hayata-yamamotoo", 'tgs',
'tgs', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
import os
import io
import itertools
import gzip
import tarfile
import zipfile
import contextlib
import functools
from tqdm import tqdm
from pytools import memoize_method
import pandas as pd
import ir_datasets
import onir
from onir import util, datasets, indices
from onir.interfaces import trec, plaintext
def sanitize_path(s):
return s.replace('/', '--')
@datasets.register('irds')
class IrdsDataset(datasets.IndexBackedDataset):
@staticmethod
def default_config():
result = datasets.IndexBackedDataset.default_config()
result.update({
'ds': '', # used as shortcut
'doc_fields': '', # used as shortcut
'query_fields': '', # used as shortcut
'docs_ds': '',
'docs_index_fields': '',
'docs_rerank_fields': '',
'queries_ds': '',
'queries_index_fields': '',
'queries_rerank_fields': '',
'rankfn': onir.config.Ranker(),
'ranktopk': 100,
})
return result
def __init__(self, config, logger, vocab):
super().__init__(config, logger, vocab)
if config['ds']:
ds = ir_datasets.load(config['ds'])
if not config['docs_ds']:
# HACK: find "parent" dataset that contains same docs handler so we don't re-build the index for the same collection
segments = config['ds'].split('/')
docs_handler = ds.docs_handler()
parent_docs_ds = config['ds']
while len(segments) > 1:
segments = segments[:-1]
parent_ds = ir_datasets.load('/'.join(segments))
if parent_ds.has_docs() and parent_ds.docs_handler() == docs_handler:
parent_docs_ds = '/'.join(segments)
config['docs_ds'] = parent_docs_ds
if not config['queries_ds']:
config['queries_ds'] = config['ds']
if config['doc_fields']:
if not config['docs_index_fields']:
config['docs_index_fields'] = config['doc_fields']
if not config['docs_rerank_fields']:
config['docs_rerank_fields'] = config['doc_fields']
if config['query_fields']:
if not config['queries_index_fields']:
config['queries_index_fields'] = config['query_fields']
if not config['queries_rerank_fields']:
config['queries_rerank_fields'] = config['query_fields']
self.docs_ds = ir_datasets.load(config['docs_ds'])
self.queries_ds = ir_datasets.load(config['queries_ds'])
assert self.docs_ds.has_docs()
assert self.queries_ds.has_queries()
if not config['docs_index_fields']:
config['docs_index_fields'] = ','.join(self.docs_ds.docs_cls()._fields[1:])
self.logger.info('auto-filled docs_index_fields as {docs_index_fields}'.format(**config))
if not config['docs_rerank_fields']:
config['docs_rerank_fields'] = ','.join(self.docs_ds.docs_cls()._fields[1:])
self.logger.info('auto-filled docs_rerank_fields as {docs_rerank_fields}'.format(**config))
if not config['queries_index_fields']:
config['queries_index_fields'] = ','.join(self.queries_ds.queries_cls()._fields[1:])
self.logger.info('auto-filled queries_index_fields as {queries_index_fields}'.format(**config))
if not config['queries_rerank_fields']:
config['queries_rerank_fields'] = ','.join(self.queries_ds.queries_cls()._fields[1:])
self.logger.info('auto-filled queries_rerank_fields as {queries_rerank_fields}'.format(**config))
base_path = os.path.join(util.path_dataset(self), sanitize_path(self.config['docs_ds']))
os.makedirs(base_path, exist_ok=True)
real_anserini_path = os.path.join(base_path, 'anserini.porter.{docs_index_fields}'.format(**self.config))
os.makedirs(real_anserini_path, exist_ok=True)
virtual_anserini_path = '{}.{}'.format(real_anserini_path, sanitize_path(config['queries_ds']))
if not os.path.exists(virtual_anserini_path):
os.symlink(real_anserini_path, virtual_anserini_path, target_is_directory=True)
self.index = indices.AnseriniIndex(virtual_anserini_path, stemmer='porter')
self.doc_store = indices.IrdsDocstore(self.docs_ds.docs_store(), config['docs_rerank_fields'])
def _get_docstore(self):
return self.doc_store
def _get_index(self, record):
return self.index
def _get_index_for_batchsearch(self):
return self.index
@memoize_method
def qrels(self, fmt='dict'):
if fmt == 'dict':
return self.queries_ds.qrels_dict()
if fmt == 'df':
df = pd.DataFrame(self.queries_ds.qrels_iter())
df = df.rename(columns={'query_id': 'qid', 'doc_id': 'did', 'relevance': 'score'})
return df
raise RuntimeError(f'unsupported fmt={fmt}')
@memoize_method
def load_queries(self) -> dict:
queries_cls = self.queries_ds.queries_cls()
fields = self.config['queries_rerank_fields'].split(',')
assert all(f in queries_cls._fields for f in fields)
field_idxs = [queries_cls._fields.index(f) for f in fields]
return {q.query_id: '\n'.join(q[i] for i in field_idxs) for q in self.queries_ds.queries_iter()}
@memoize_method
def _load_queries_base(self, subset):
# HACK: this subtly only gets called for runs in this impl. Use queries_index_fields instead here.
queries_cls = self.queries_ds.queries_cls()
fields = self.config['queries_index_fields'].split(',')
assert all(f in queries_cls._fields for f in fields)
field_idxs = [queries_cls._fields.index(f) for f in fields]
return {q.query_id: ' '.join(q[i] for i in field_idxs).replace('\n', ' ') for q in self.queries_ds.queries_iter()}
def path_segment(self):
return '__'.join([
super().path_segment(),
sanitize_path(self.config["docs_ds"]),
self.config['docs_index_fields'],
self.config['docs_rerank_fields'],
sanitize_path(self.config["queries_ds"]),
self.config['queries_index_fields'],
self.config['queries_rerank_fields']])
def init(self, force=False):
if not self.index.built() or force:
doc_it = self._init_iter_collection()
doc_it = self.logger.pbar(doc_it, 'docs')
self.index.build(doc_it)
# Attempt to grab everything (without wasting too many resources).
# This isn't really a guarantee we have everything, but it should work in most cases.
next(self.docs_ds.docs_iter())
next(self.queries_ds.queries_iter())
next(self.queries_ds.qrels_iter())
def _init_iter_collection(self):
docs_cls = self.docs_ds.docs_cls()
fields = self.config['docs_index_fields'].split(',')
assert all(f in docs_cls._fields for f in fields)
field_idxs = [docs_cls._fields.index(f) for f in fields]
for doc in self.docs_ds.docs_iter():
yield indices.RawDoc(doc.doc_id, '\n'.join(str(doc[i]) for i in field_idxs))
|
from django.apps import AppConfig
class UnidadeConfig(AppConfig):
name = 'unidade'
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test_dist_base import TestDistBase
class TestDistMnist2x2Lars(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
def test_se_resnext(self):
self.check_with_place("dist_mnist_lars.py", delta=1e-5)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
'''
A module for shelling out.
Keep in mind that this module is insecure, in that it can give whomever has
access to the master root execution access to all salt minions.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import functools
import glob
import logging
import os
import shutil
import subprocess
import sys
import time
import traceback
import fnmatch
import base64
import re
import tempfile
# Import salt libs
import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.json
import salt.utils.path
import salt.utils.platform
import salt.utils.powershell
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.timed_subprocess
import salt.utils.user
import salt.utils.versions
import salt.utils.vt
import salt.utils.win_dacl
import salt.utils.win_reg
import salt.grains.extra
from salt.ext import six
from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \
SaltInvocationError
from salt.log import LOG_LEVELS
from salt.ext.six.moves import range, zip, map
# Only available on POSIX systems, nonfatal on windows
try:
import pwd
import grp
except ImportError:
pass
if salt.utils.platform.is_windows():
from salt.utils.win_runas import runas as win_runas
from salt.utils.win_functions import escape_argument as _cmd_quote
HAS_WIN_RUNAS = True
else:
from salt.ext.six.moves import shlex_quote as _cmd_quote
HAS_WIN_RUNAS = False
__proxyenabled__ = ['*']
# Define the module's virtual name
__virtualname__ = 'cmd'
# Set up logging
log = logging.getLogger(__name__)
DEFAULT_SHELL = salt.grains.extra.shell()['shell']
# Overwriting the cmd python module makes debugging modules with pdb a bit
# harder so lets do it this way instead.
def __virtual__():
return __virtualname__
def _check_cb(cb_):
'''
If the callback is None or is not callable, return a lambda that returns
the value passed.
'''
if cb_ is not None:
if hasattr(cb_, '__call__'):
return cb_
else:
log.error('log_callback is not callable, ignoring')
return lambda x: x
def _python_shell_default(python_shell, __pub_jid):
'''
Set python_shell default based on remote execution and __opts__['cmd_safe']
'''
try:
# Default to python_shell=True when run directly from remote execution
# system. Cross-module calls won't have a jid.
if __pub_jid and python_shell is None:
return True
elif __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
return True
except NameError:
pass
return python_shell
def _chroot_pids(chroot):
pids = []
for root in glob.glob('/proc/[0-9]*/root'):
try:
link = os.path.realpath(root)
if link.startswith(chroot):
pids.append(int(os.path.basename(
os.path.dirname(root)
)))
except OSError:
pass
return pids
def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_override=None):
'''
If template is a valid template engine, process the cmd and cwd through
that engine.
'''
if not template:
return (cmd, cwd)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs['salt'] = __salt__
if pillarenv is not None or pillar_override is not None:
pillarenv = pillarenv or __opts__['pillarenv']
kwargs['pillar'] = _gather_pillar(pillarenv, pillar_override)
else:
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(contents))
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
to_str=True,
**kwargs
)
salt.utils.files.safe_rm(tmp_path_fn)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to execute cmd with error: {0}'.format(
data['data']
)
)
else:
return data['data']
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd)
def _check_loglevel(level='info'):
'''
Retrieve the level code for use in logging.Logger.log().
'''
try:
level = level.lower()
if level == 'quiet':
return None
else:
return LOG_LEVELS[level]
except (AttributeError, KeyError):
log.error(
'Invalid output_loglevel \'%s\'. Valid levels are: %s. Falling '
'back to \'info\'.',
level, ', '.join(sorted(LOG_LEVELS, reverse=True))
)
return LOG_LEVELS['info']
def _parse_env(env):
if not env:
env = {}
if isinstance(env, list):
env = salt.utils.data.repack_dictlist(env)
if not isinstance(env, dict):
env = {}
return env
def _gather_pillar(pillarenv, pillar_override):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
__opts__['saltenv'],
pillar_override=pillar_override,
pillarenv=pillarenv
)
ret = pillar.compile_pillar()
if pillar_override and isinstance(pillar_override, dict):
ret.update(pillar_override)
return ret
def _check_avail(cmd):
'''
Check to see if the given command can be run
'''
if isinstance(cmd, list):
cmd = ' '.join([six.text_type(x) if not isinstance(x, six.string_types) else x
for x in cmd])
bret = True
wret = False
if __salt__['config.get']('cmd_blacklist_glob'):
blist = __salt__['config.get']('cmd_blacklist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# BAD! you are blacklisted
bret = False
if __salt__['config.get']('cmd_whitelist_glob', []):
blist = __salt__['config.get']('cmd_whitelist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# GOOD! You are whitelisted
wret = True
break
else:
# If no whitelist set then alls good!
wret = True
return bret and wret
def _run(cmd,
cwd=None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
prepend_path=None,
rstrip=True,
template=None,
umask=None,
timeout=None,
with_communicate=True,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
pillarenv=None,
pillar_override=None,
use_vt=False,
password=None,
bg=False,
encoded_cmd=False,
success_retcodes=None,
**kwargs):
'''
Do the DRY thing and only call subprocess.Popen() once
'''
if 'pillar' in kwargs and not pillar_override:
pillar_override = kwargs['pillar']
if _is_valid_shell(shell) is False:
log.warning(
'Attempt to run a shell command with what may be an invalid shell! '
'Check to ensure that the shell <%s> is valid for this user.',
shell
)
output_loglevel = _check_loglevel(output_loglevel)
log_callback = _check_cb(log_callback)
use_sudo = False
if runas is None and '__context__' in globals():
runas = __context__.get('runas')
if password is None and '__context__' in globals():
password = __context__.get('runas_password')
# Set the default working directory to the home directory of the user
# salt-minion is running as. Defaults to home directory of user under which
# the minion is running.
if not cwd:
cwd = os.path.expanduser('~{0}'.format('' if not runas else runas))
# make sure we can access the cwd
# when run from sudo or another environment where the euid is
# changed ~ will expand to the home of the original uid and
# the euid might not have access to it. See issue #1844
if not os.access(cwd, os.R_OK):
cwd = '/'
if salt.utils.platform.is_windows():
cwd = os.path.abspath(os.sep)
else:
# Handle edge cases where numeric/other input is entered, and would be
# yaml-ified into non-string types
cwd = six.text_type(cwd)
if bg:
ignore_retcode = True
use_vt = False
if not salt.utils.platform.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = 'The shell {0} is not available'.format(shell)
raise CommandExecutionError(msg)
if salt.utils.platform.is_windows() and use_vt: # Memozation so not much overhead
raise CommandExecutionError('VT not available on windows')
if shell.lower().strip() == 'powershell':
# Strip whitespace
if isinstance(cmd, six.string_types):
cmd = cmd.strip()
# If we were called by script(), then fakeout the Windows
# shell to run a Powershell script.
# Else just run a Powershell command.
stack = traceback.extract_stack(limit=2)
# extract_stack() returns a list of tuples.
# The last item in the list [-1] is the current method.
# The third item[2] in each tuple is the name of that method.
if stack[-2][2] == 'script':
cmd = 'Powershell -NonInteractive -NoProfile -ExecutionPolicy Bypass -File ' + cmd
elif encoded_cmd:
cmd = 'Powershell -NonInteractive -EncodedCommand {0}'.format(cmd)
else:
cmd = 'Powershell -NonInteractive -NoProfile "{0}"'.format(cmd.replace('"', '\\"'))
# munge the cmd and cwd through the template
(cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv, pillarenv, pillar_override)
ret = {}
# If the pub jid is here then this is a remote ex or salt call command and needs to be
# checked if blacklisted
if '__pub_jid' in kwargs:
if not _check_avail(cmd):
raise CommandExecutionError(
'The shell command "{0}" is not permitted'.format(cmd)
)
env = _parse_env(env)
for bad_env_key in (x for x, y in six.iteritems(env) if y is None):
log.error('Environment variable \'%s\' passed without a value. '
'Setting value to an empty string', bad_env_key)
env[bad_env_key] = ''
def _get_stripped(cmd):
# Return stripped command string copies to improve logging.
if isinstance(cmd, list):
return [x.strip() if isinstance(x, six.string_types) else x for x in cmd]
elif isinstance(cmd, six.string_types):
return cmd.strip()
else:
return cmd
if output_loglevel is not None:
# Always log the shell commands at INFO unless quiet logging is
# requested. The command output is what will be controlled by the
# 'loglevel' parameter.
msg = (
'Executing command {0}{1}{0} {2}{3}in directory \'{4}\'{5}'.format(
'\'' if not isinstance(cmd, list) else '',
_get_stripped(cmd),
'as user \'{0}\' '.format(runas) if runas else '',
'in group \'{0}\' '.format(group) if group else '',
cwd,
'. Executing command in the background, no output will be '
'logged.' if bg else ''
)
)
log.info(log_callback(msg))
if runas and salt.utils.platform.is_windows():
if not HAS_WIN_RUNAS:
msg = 'missing salt/utils/win_runas.py'
raise CommandExecutionError(msg)
if isinstance(cmd, (list, tuple)):
cmd = ' '.join(cmd)
return win_runas(cmd, runas, password, cwd)
if runas and salt.utils.platform.is_darwin():
# we need to insert the user simulation into the command itself and not
# just run it from the environment on macOS as that
# method doesn't work properly when run as root for certain commands.
if isinstance(cmd, (list, tuple)):
cmd = ' '.join(map(_cmd_quote, cmd))
cmd = 'su -l {0} -c "{1}"'.format(runas, cmd)
# set runas to None, because if you try to run `su -l` as well as
# simulate the environment macOS will prompt for the password of the
# user and will cause salt to hang.
runas = None
if runas:
# Save the original command before munging it
try:
pwd.getpwnam(runas)
except KeyError:
raise CommandExecutionError(
'User \'{0}\' is not available'.format(runas)
)
if group:
if salt.utils.platform.is_windows():
msg = 'group is not currently available on Windows'
raise SaltInvocationError(msg)
if not which_bin(['sudo']):
msg = 'group argument requires sudo but not found'
raise CommandExecutionError(msg)
try:
grp.getgrnam(group)
except KeyError:
raise CommandExecutionError(
'Group \'{0}\' is not available'.format(runas)
)
else:
use_sudo = True
if runas or group:
try:
# Getting the environment for the runas user
# Use markers to thwart any stdout noise
# There must be a better way to do this.
import uuid
marker = '<<<' + str(uuid.uuid4()) + '>>>'
marker_b = marker.encode(__salt_system_encoding__)
py_code = (
'import sys, os, itertools; '
'sys.stdout.write(\"' + marker + '\"); '
'sys.stdout.write(\"\\0\".join(itertools.chain(*os.environ.items()))); '
'sys.stdout.write(\"' + marker + '\");'
)
if use_sudo or __grains__['os'] in ['MacOS', 'Darwin']:
env_cmd = ['sudo']
# runas is optional if use_sudo is set.
if runas:
env_cmd.extend(['-u', runas])
if group:
env_cmd.extend(['-g', group])
if shell != DEFAULT_SHELL:
env_cmd.extend(['-s', '--', shell, '-c'])
else:
env_cmd.extend(['-i', '--'])
env_cmd.extend([sys.executable])
elif __grains__['os'] in ['FreeBSD']:
env_cmd = ('su', '-', runas, '-c',
"{0} -c {1}".format(shell, sys.executable))
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
msg = 'env command: {0}'.format(env_cmd)
log.debug(log_callback(msg))
env_bytes, env_encoded_err = subprocess.Popen(
env_cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE
).communicate(salt.utils.stringutils.to_bytes(py_code))
marker_count = env_bytes.count(marker_b)
if marker_count == 0:
# Possibly PAM prevented the login
log.error(
'Environment could not be retrieved for user \'%s\': '
'stderr=%r stdout=%r',
runas, env_encoded_err, env_bytes
)
# Ensure that we get an empty env_runas dict below since we
# were not able to get the environment.
env_bytes = b''
elif marker_count != 2:
raise CommandExecutionError(
'Environment could not be retrieved for user \'{0}\'',
info={'stderr': repr(env_encoded_err),
'stdout': repr(env_bytes)}
)
else:
# Strip the marker
env_bytes = env_bytes.split(marker_b)[1]
if six.PY2:
import itertools
env_runas = dict(itertools.izip(*[iter(env_bytes.split(b'\0'))]*2))
elif six.PY3:
env_runas = dict(list(zip(*[iter(env_bytes.split(b'\0'))]*2)))
env_runas = dict(
(salt.utils.stringutils.to_str(k),
salt.utils.stringutils.to_str(v))
for k, v in six.iteritems(env_runas)
)
env_runas.update(env)
# Fix platforms like Solaris that don't set a USER env var in the
# user's default environment as obtained above.
if env_runas.get('USER') != runas:
env_runas['USER'] = runas
# Fix some corner cases where shelling out to get the user's
# environment returns the wrong home directory.
runas_home = os.path.expanduser('~{0}'.format(runas))
if env_runas.get('HOME') != runas_home:
env_runas['HOME'] = runas_home
env = env_runas
except ValueError as exc:
log.exception('Error raised retrieving environment for user %s', runas)
raise CommandExecutionError(
'Environment could not be retrieved for user \'{0}\': {1}'.format(
runas, exc
)
)
if reset_system_locale is True:
if not salt.utils.platform.is_windows():
# Default to C!
# Salt only knows how to parse English words
# Don't override if the user has passed LC_ALL
env.setdefault('LC_CTYPE', 'C')
env.setdefault('LC_NUMERIC', 'C')
env.setdefault('LC_TIME', 'C')
env.setdefault('LC_COLLATE', 'C')
env.setdefault('LC_MONETARY', 'C')
env.setdefault('LC_MESSAGES', 'C')
env.setdefault('LC_PAPER', 'C')
env.setdefault('LC_NAME', 'C')
env.setdefault('LC_ADDRESS', 'C')
env.setdefault('LC_TELEPHONE', 'C')
env.setdefault('LC_MEASUREMENT', 'C')
env.setdefault('LC_IDENTIFICATION', 'C')
env.setdefault('LANGUAGE', 'C')
else:
# On Windows set the codepage to US English.
if python_shell:
cmd = 'chcp 437 > nul & ' + cmd
if clean_env:
run_env = env
else:
run_env = os.environ.copy()
run_env.update(env)
if prepend_path:
run_env['PATH'] = ':'.join((prepend_path, run_env['PATH']))
if python_shell is None:
python_shell = False
new_kwargs = {'cwd': cwd,
'shell': python_shell,
'env': run_env if six.PY3 else salt.utils.data.encode(run_env),
'stdin': six.text_type(stdin) if stdin is not None else stdin,
'stdout': stdout,
'stderr': stderr,
'with_communicate': with_communicate,
'timeout': timeout,
'bg': bg,
}
if 'stdin_raw_newlines' in kwargs:
new_kwargs['stdin_raw_newlines'] = kwargs['stdin_raw_newlines']
if umask is not None:
_umask = six.text_type(umask).lstrip('0')
if _umask == '':
msg = 'Zero umask is not allowed.'
raise CommandExecutionError(msg)
try:
_umask = int(_umask, 8)
except ValueError:
raise CommandExecutionError("Invalid umask: '{0}'".format(umask))
else:
_umask = None
if runas or group or umask:
new_kwargs['preexec_fn'] = functools.partial(
salt.utils.user.chugid_and_umask,
runas,
_umask,
group)
if not salt.utils.platform.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
if new_kwargs['shell'] is True:
new_kwargs['executable'] = shell
new_kwargs['close_fds'] = True
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
raise CommandExecutionError(
'Specified cwd \'{0}\' either not absolute or does not exist'
.format(cwd)
)
if python_shell is not True \
and not salt.utils.platform.is_windows() \
and not isinstance(cmd, list):
cmd = salt.utils.args.shlex_split(cmd)
if success_retcodes is None:
success_retcodes = [0]
else:
try:
success_retcodes = [int(i) for i in
salt.utils.args.split_input(
success_retcodes
)]
except ValueError:
raise SaltInvocationError(
'success_retcodes must be a list of integers'
)
if not use_vt:
# This is where the magic happens
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **new_kwargs)
except (OSError, IOError) as exc:
msg = (
'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: '.format(
cmd if output_loglevel is not None else 'REDACTED',
new_kwargs
)
)
try:
if exc.filename is None:
msg += 'command not found'
else:
msg += '{0}: {1}'.format(exc, exc.filename)
except AttributeError:
# Both IOError and OSError have the filename attribute, so this
# is a precaution in case the exception classes in the previous
# try/except are changed.
msg += 'unknown'
raise CommandExecutionError(msg)
try:
proc.run()
except TimedProcTimeoutError as exc:
ret['stdout'] = six.text_type(exc)
ret['stderr'] = ''
ret['retcode'] = None
ret['pid'] = proc.process.pid
# ok return code for timeouts?
ret['retcode'] = 1
return ret
if output_loglevel != 'quiet' and output_encoding is not None:
log.debug('Decoding output from command %s using %s encoding',
cmd, output_encoding)
try:
out = salt.utils.stringutils.to_unicode(
proc.stdout,
encoding=output_encoding)
except TypeError:
# stdout is None
out = ''
except UnicodeDecodeError:
out = salt.utils.stringutils.to_unicode(
proc.stdout,
encoding=output_encoding,
errors='replace')
if output_loglevel != 'quiet':
log.error(
'Failed to decode stdout from command %s, non-decodable '
'characters have been replaced', cmd
)
try:
err = salt.utils.stringutils.to_unicode(
proc.stderr,
encoding=output_encoding)
except TypeError:
# stderr is None
err = ''
except UnicodeDecodeError:
err = salt.utils.stringutils.to_unicode(
proc.stderr,
encoding=output_encoding,
errors='replace')
if output_loglevel != 'quiet':
log.error(
'Failed to decode stderr from command %s, non-decodable '
'characters have been replaced', cmd
)
if rstrip:
if out is not None:
out = out.rstrip()
if err is not None:
err = err.rstrip()
ret['pid'] = proc.process.pid
ret['retcode'] = proc.process.returncode
if ret['retcode'] in success_retcodes:
ret['retcode'] = 0
ret['stdout'] = out
ret['stderr'] = err
else:
formatted_timeout = ''
if timeout:
formatted_timeout = ' (timeout: {0}s)'.format(timeout)
if output_loglevel is not None:
msg = 'Running {0} in VT{1}'.format(cmd, formatted_timeout)
log.debug(log_callback(msg))
stdout, stderr = '', ''
now = time.time()
if timeout:
will_timeout = now + timeout
else:
will_timeout = -1
try:
proc = salt.utils.vt.Terminal(
cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
preexec_fn=new_kwargs.get('preexec_fn', None),
env=run_env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
stream_stdout=True,
stream_stderr=True
)
ret['pid'] = proc.pid
while proc.has_unread_data:
try:
try:
time.sleep(0.5)
try:
cstdout, cstderr = proc.recv()
except IOError:
cstdout, cstderr = '', ''
if cstdout:
stdout += cstdout
else:
cstdout = ''
if cstderr:
stderr += cstderr
else:
cstderr = ''
if timeout and (time.time() > will_timeout):
ret['stderr'] = (
'SALT: Timeout after {0}s\n{1}').format(
timeout, stderr)
ret['retcode'] = None
break
except KeyboardInterrupt:
ret['stderr'] = 'SALT: User break\n{0}'.format(stderr)
ret['retcode'] = 1
break
except salt.utils.vt.TerminalException as exc:
log.error('VT: %s', exc,
exc_info_on_loglevel=logging.DEBUG)
ret = {'retcode': 1, 'pid': '2'}
break
# only set stdout on success as we already mangled in other
# cases
ret['stdout'] = stdout
if not proc.isalive():
# Process terminated, i.e., not canceled by the user or by
# the timeout
ret['stderr'] = stderr
ret['retcode'] = proc.exitstatus
if ret['retcode'] in success_retcodes:
ret['retcode'] = 0
ret['pid'] = proc.pid
finally:
proc.close(terminate=True, kill=True)
try:
if ignore_retcode:
__context__['retcode'] = 0
else:
__context__['retcode'] = ret['retcode']
except NameError:
# Ignore the context error during grain generation
pass
# Log the output
if output_loglevel is not None:
if not ignore_retcode and ret['retcode'] != 0:
if output_loglevel < LOG_LEVELS['error']:
output_loglevel = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(output_loglevel, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(output_loglevel, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(output_loglevel, 'retcode: {0}'.format(ret['retcode']))
return ret
def _run_quiet(cmd,
cwd=None,
stdin=None,
output_encoding=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None,
success_retcodes=None):
'''
Helper for running commands quietly for minion startup
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
output_encoding=output_encoding,
output_loglevel='quiet',
log_callback=None,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override,
success_retcodes=success_retcodes)['stdout']
def _run_all_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None,
output_encoding=None,
success_retcodes=None):
'''
Helper for running commands quietly for minion startup.
Returns a dict of return data.
output_loglevel argument is ignored. This is here for when we alias
cmd.run_all directly to _run_all_quiet in certain chicken-and-egg
situations where modules need to work both before and after
the __salt__ dictionary is populated (cf dracr.py)
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
output_encoding=output_encoding,
output_loglevel='quiet',
log_callback=None,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override,
success_retcodes=success_retcodes)
def run(cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
password=None,
encoded_cmd=False,
raise_err=False,
prepend_path=None,
success_retcodes=None,
**kwargs):
r'''
Execute the passed command and return the output as a string
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running.
:param str group: Group to run command as. Not currently supported
on Windows.
:param str password: Windows only. Only required when the minion proccess
is running under a non-privileged account. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If ``False``, let python handle the positional
arguments. Set to ``True`` to use shell features, such as pipes or
redirection.
:param bool bg: If ``True``, run command in background and do not await or
deliver it's results
.. versionadded:: 2016.3.0
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool encoded_cmd: Specify if the supplied command is encoded.
Only applies to shell 'powershell'.
:param bool raise_err: If ``True`` and the command has a nonzero exit code,
a CommandExecutionError exception will be raised.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
bg=bg,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if raise_err:
raise CommandExecutionError(
log_callback(ret['stdout'] if not hide_output else '')
)
log.log(lvl, 'output: %s', log_callback(ret['stdout']))
return ret['stdout'] if not hide_output else ''
def shell(cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
password=None,
prepend_path=None,
success_retcodes=None,
**kwargs):
'''
Execute the passed command and return the output as a string.
.. versionadded:: 2015.5.0
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str group: Group to run command as. Not currently supported
on Windows.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param int shell: Shell to execute under. Defaults to the system default
shell.
:param bool bg: If True, run command in background and do not await or
deliver its results
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.shell 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. warning::
This passes the cmd argument directly to the shell without any further
processing! Be absolutely sure that you have properly sanitized the
command passed to this function and do not use untrusted inputs.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.shell "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.shell template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.shell "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.shell "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.shell cmd='sed -e s/=/:/g'
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
return run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
group=group,
shell=shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
hide_output=hide_output,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
bg=bg,
password=password,
success_retcodes=success_retcodes,
**kwargs)
def run_stdout(cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
prepend_path=None,
success_retcodes=None,
**kwargs):
'''
Execute a command, and only return the standard out
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_stdout 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stdout "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stdout template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
**kwargs)
return ret['stdout'] if not hide_output else ''
def run_stderr(cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
prepend_path=None,
success_retcodes=None,
**kwargs):
'''
Execute a command and only return the standard error
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_stderr 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stderr "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stderr template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
saltenv=saltenv,
password=password,
success_retcodes=success_retcodes,
**kwargs)
return ret['stderr'] if not hide_output else ''
def run_all(cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
redirect_stderr=False,
password=None,
encoded_cmd=False,
prepend_path=None,
success_retcodes=None,
**kwargs):
'''
Execute the passed command and return a dict of return data
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_all 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool encoded_cmd: Specify if the supplied command is encoded.
Only applies to shell 'powershell'.
.. versionadded:: 2018.3.0
:param bool redirect_stderr: If set to ``True``, then stderr will be
redirected to stdout. This is helpful for cases where obtaining both
the retcode and output is desired, but it is not desired to have the
output separated into both stdout and stderr.
.. versionadded:: 2015.8.2
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param bool bg: If ``True``, run command in background and do not await or
deliver its results
.. versionadded:: 2016.3.6
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.run_all "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_all template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
stderr = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
ret = _run(cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
stderr=stderr,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
**kwargs)
if hide_output:
ret['stdout'] = ret['stderr'] = ''
return ret
def retcode(cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
success_retcodes=None,
**kwargs):
'''
Execute a shell command and return the command's return code.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.retcode 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:rtype: int
:rtype: None
:returns: Return Code as an int or None if there was an exception.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.retcode "file /bin/bash"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.retcode template=jinja "file {{grains.pythonpath[0]}}/python"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.retcode "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
**kwargs)
return ret['retcode']
def _retcode_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
template=None,
umask=None,
output_encoding=None,
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
success_retcodes=None,
**kwargs):
'''
Helper for running commands quietly for minion startup. Returns same as
the retcode() function.
'''
return retcode(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_encoding=output_encoding,
output_loglevel='quiet',
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
**kwargs)
def script(source,
args=None,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template=None,
umask=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
saltenv='base',
use_vt=False,
bg=False,
password=None,
success_retcodes=None,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
.. code-block:: bash
salt myminion cmd.script salt://foo.sh "arg1 'arg two' arg3"
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run script as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param bool bg: If True, run script in background and do not await or
deliver it's results
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.script 'some command' env='{"FOO": "bar"}'
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: If the command has not terminated after timeout
seconds, send the subprocess sigterm, and if sigterm is ignored, follow
up with sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh
salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
def _cleanup_tempfile(path):
try:
__salt__['file.remove'](path)
except (SaltInvocationError, CommandExecutionError) as exc:
log.error(
'cmd.script: Unable to clean tempfile \'%s\': %s',
path, exc, exc_info_on_loglevel=logging.DEBUG
)
if '__env__' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('__env__')
win_cwd = False
if salt.utils.platform.is_windows() and runas and cwd is None:
# Create a temp working directory
cwd = tempfile.mkdtemp(dir=__opts__['cachedir'])
win_cwd = True
salt.utils.win_dacl.set_permissions(obj_name=cwd,
principal=runas,
permissions='full_control')
path = salt.utils.files.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1])
if template:
if 'pillarenv' in kwargs or 'pillar' in kwargs:
pillarenv = kwargs.get('pillarenv', __opts__.get('pillarenv'))
kwargs['pillar'] = _gather_pillar(pillarenv, kwargs.get('pillar'))
fn_ = __salt__['cp.get_template'](source,
path,
template,
saltenv,
**kwargs)
if not fn_:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
if not salt.utils.platform.is_windows():
os.chmod(path, 320)
os.chown(path, __salt__['file.user_to_uid'](runas), -1)
path = _cmd_quote(path)
ret = _run(path + ' ' + six.text_type(args) if args else path,
cwd=cwd,
stdin=stdin,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
env=env,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
use_vt=use_vt,
bg=bg,
password=password,
success_retcodes=success_retcodes,
**kwargs)
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
if hide_output:
ret['stdout'] = ret['stderr'] = ''
return ret
def script_retcode(source,
args=None,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
output_encoding=None,
output_loglevel='debug',
log_callback=None,
use_vt=False,
password=None,
success_retcodes=None,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
The script can also be formatted as a template, the default is jinja.
Only evaluate the script return code and do not block for terminal output
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
"arg1 'arg two' arg3"
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str group: Group to run script as. Not currently supported
on Windows.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.script_retcode 'some command' env='{"FOO": "bar"}'
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param int timeout: If the command has not terminated after timeout
seconds, send the subprocess sigterm, and if sigterm is ignored, follow
up with sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh
salt '*' cmd.script_retcode salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script_retcode salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
if '__env__' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('__env__')
return script(source=source,
args=args,
cwd=cwd,
stdin=stdin,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
use_vt=use_vt,
password=password,
success_retcodes=success_retcodes,
**kwargs)['retcode']
def which(cmd):
'''
Returns the path of an executable available on the minion, None otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.which cat
'''
return salt.utils.path.which(cmd)
def which_bin(cmds):
'''
Returns the first command found in a list of commands
CLI Example:
.. code-block:: bash
salt '*' cmd.which_bin '[pip2, pip, pip-python]'
'''
return salt.utils.path.which_bin(cmds)
def has_exec(cmd):
'''
Returns true if the executable is available on the minion, false otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.has_exec cat
'''
return which(cmd) is not None
def exec_code(lang, code, cwd=None, args=None, **kwargs):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. The stdout will be returned.
All parameters from :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` except python_shell can be used.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code ruby 'puts "cheese"'
salt '*' cmd.exec_code ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}'
'''
return exec_code_all(lang, code, cwd, args, **kwargs)['stdout']
def exec_code_all(lang, code, cwd=None, args=None, **kwargs):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. All cmd artifacts (stdout, stderr, retcode, pid)
will be returned.
All parameters from :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` except python_shell can be used.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code_all ruby 'puts "cheese"'
salt '*' cmd.exec_code_all ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}'
'''
powershell = lang.lower().startswith("powershell")
if powershell:
codefile = salt.utils.files.mkstemp(suffix=".ps1")
else:
codefile = salt.utils.files.mkstemp()
with salt.utils.files.fopen(codefile, 'w+t', binary=False) as fp_:
fp_.write(salt.utils.stringutils.to_str(code))
if powershell:
cmd = [lang, "-File", codefile]
else:
cmd = [lang, codefile]
if isinstance(args, six.string_types):
cmd.append(args)
elif isinstance(args, list):
cmd += args
ret = run_all(cmd, cwd=cwd, python_shell=False, **kwargs)
os.remove(codefile)
return ret
def tty(device, echo=''):
'''
Echo a string to a specific tty
CLI Example:
.. code-block:: bash
salt '*' cmd.tty tty0 'This is a test'
salt '*' cmd.tty pts3 'This is a test'
'''
if device.startswith('tty'):
teletype = '/dev/{0}'.format(device)
elif device.startswith('pts'):
teletype = '/dev/{0}'.format(device.replace('pts', 'pts/'))
else:
return {'Error': 'The specified device is not a valid TTY'}
try:
with salt.utils.files.fopen(teletype, 'wb') as tty_device:
tty_device.write(salt.utils.stringutils.to_bytes(echo))
return {
'Success': 'Message was successfully echoed to {0}'.format(teletype)
}
except IOError:
return {
'Error': 'Echoing to {0} returned error'.format(teletype)
}
def run_chroot(root,
cmd,
cwd=None,
stdin=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=True,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='quiet',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
success_retcodes=None,
**kwargs):
'''
.. versionadded:: 2014.7.0
This function runs :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` wrapped
within a chroot, with dev and proc mounted in the chroot
:param str root: Path to the root of the jail to use.
stdin
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
runas
User to run script as.
group
Group to run script as.
shell
Shell to execute under. Defaults to the system default shell.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:parar str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_chroot 'some command' env='{"FOO": "bar"}'
:param dict clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip:
Strip all whitespace off the end of output before it is returned.
:param str umask:
The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout:
A timeout in seconds for the executed process to return.
:param bool use_vt:
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs. This is experimental.
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.run_chroot /var/lib/lxc/container_name/rootfs 'sh /tmp/bootstrap.sh'
'''
__salt__['mount.mount'](
os.path.join(root, 'dev'),
'udev',
fstype='devtmpfs')
__salt__['mount.mount'](
os.path.join(root, 'proc'),
'proc',
fstype='proc')
# Execute chroot routine
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
if isinstance(cmd, (list, tuple)):
cmd = ' '.join([six.text_type(i) for i in cmd])
cmd = 'chroot {0} {1} -c {2}'.format(root, sh_, _cmd_quote(cmd))
run_func = __context__.pop('cmd.run_chroot.func', run_all)
ret = run_func(cmd,
runas=runas,
group=group,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar=kwargs.get('pillar'),
use_vt=use_vt,
success_retcodes=success_retcodes,
bg=bg)
# Kill processes running in the chroot
for i in range(6):
pids = _chroot_pids(root)
if not pids:
break
for pid in pids:
# use sig 15 (TERM) for first 3 attempts, then 9 (KILL)
sig = 15 if i < 3 else 9
os.kill(pid, sig)
if _chroot_pids(root):
log.error('Processes running in chroot could not be killed, '
'filesystem will remain mounted')
__salt__['mount.umount'](os.path.join(root, 'proc'))
__salt__['mount.umount'](os.path.join(root, 'dev'))
if hide_output:
ret['stdout'] = ret['stderr'] = ''
return ret
def _is_valid_shell(shell):
'''
Attempts to search for valid shells on a system and
see if a given shell is in the list
'''
if salt.utils.platform.is_windows():
return True # Don't even try this for Windows
shells = '/etc/shells'
available_shells = []
if os.path.exists(shells):
try:
with salt.utils.files.fopen(shells, 'r') as shell_fp:
lines = [salt.utils.stringutils.to_unicode(x)
for x in shell_fp.read().splitlines()]
for line in lines:
if line.startswith('#'):
continue
else:
available_shells.append(line)
except OSError:
return True
else:
# No known method of determining available shells
return None
if shell in available_shells:
return True
else:
return False
def shells():
'''
Lists the valid shells on this system via the /etc/shells file
.. versionadded:: 2015.5.0
CLI Example::
salt '*' cmd.shells
'''
shells_fn = '/etc/shells'
ret = []
if os.path.exists(shells_fn):
try:
with salt.utils.files.fopen(shells_fn, 'r') as shell_fp:
lines = [salt.utils.stringutils.to_unicode(x)
for x in shell_fp.read().splitlines()]
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
elif not line:
continue
else:
ret.append(line)
except OSError:
log.error("File '%s' was not found", shells_fn)
return ret
def shell_info(shell, list_modules=False):
'''
.. versionadded:: 2016.11.0
Provides information about a shell or script languages which often use
``#!``. The values returned are dependent on the shell or scripting
languages all return the ``installed``, ``path``, ``version``,
``version_raw``
Args:
shell (str): Name of the shell. Support shells/script languages include
bash, cmd, perl, php, powershell, python, ruby and zsh
list_modules (bool): True to list modules available to the shell.
Currently only lists powershell modules.
Returns:
dict: A dictionary of information about the shell
.. code-block:: python
{'version': '<2 or 3 numeric components dot-separated>',
'version_raw': '<full version string>',
'path': '<full path to binary>',
'installed': <True, False or None>,
'<attribute>': '<attribute value>'}
.. note::
- ``installed`` is always returned, if ``None`` or ``False`` also
returns error and may also return ``stdout`` for diagnostics.
- ``version`` is for use in determine if a shell/script language has a
particular feature set, not for package management.
- The shell must be within the executable search path.
CLI Example:
.. code-block:: bash
salt '*' cmd.shell_info bash
salt '*' cmd.shell_info powershell
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
regex_shells = {
'bash': [r'version (\d\S*)', 'bash', '--version'],
'bash-test-error': [r'versioZ ([-\w.]+)', 'bash', '--version'], # used to test an error result
'bash-test-env': [r'(HOME=.*)', 'bash', '-c', 'declare'], # used to test an error result
'zsh': [r'^zsh (\d\S*)', 'zsh', '--version'],
'tcsh': [r'^tcsh (\d\S*)', 'tcsh', '--version'],
'cmd': [r'Version ([\d.]+)', 'cmd.exe', '/C', 'ver'],
'powershell': [r'PSVersion\s+(\d\S*)', 'powershell', '-NonInteractive', '$PSVersionTable'],
'perl': [r'^(\d\S*)', 'perl', '-e', 'printf "%vd\n", $^V;'],
'python': [r'^Python (\d\S*)', 'python', '-V'],
'ruby': [r'^ruby (\d\S*)', 'ruby', '-v'],
'php': [r'^PHP (\d\S*)', 'php', '-v']
}
# Ensure ret['installed'] always as a value of True, False or None (not sure)
ret = {'installed': False}
if salt.utils.platform.is_windows() and shell == 'powershell':
pw_keys = salt.utils.win_reg.list_keys(
hive='HKEY_LOCAL_MACHINE',
key='Software\\Microsoft\\PowerShell')
pw_keys.sort(key=int)
if len(pw_keys) == 0:
return {
'error': 'Unable to locate \'powershell\' Reason: Cannot be '
'found in registry.',
'installed': False,
}
for reg_ver in pw_keys:
install_data = salt.utils.win_reg.read_value(
hive='HKEY_LOCAL_MACHINE',
key='Software\\Microsoft\\PowerShell\\{0}'.format(reg_ver),
vname='Install')
if install_data.get('vtype') == 'REG_DWORD' and \
install_data.get('vdata') == 1:
details = salt.utils.win_reg.list_values(
hive='HKEY_LOCAL_MACHINE',
key='Software\\Microsoft\\PowerShell\\{0}\\'
'PowerShellEngine'.format(reg_ver))
# reset data, want the newest version details only as powershell
# is backwards compatible
ret = {}
# if all goes well this will become True
ret['installed'] = None
ret['path'] = which('powershell.exe')
for attribute in details:
if attribute['vname'].lower() == '(default)':
continue
elif attribute['vname'].lower() == 'powershellversion':
ret['psversion'] = attribute['vdata']
ret['version_raw'] = attribute['vdata']
elif attribute['vname'].lower() == 'runtimeversion':
ret['crlversion'] = attribute['vdata']
if ret['crlversion'][0].lower() == 'v':
ret['crlversion'] = ret['crlversion'][1::]
elif attribute['vname'].lower() == 'pscompatibleversion':
# reg attribute does not end in s, the powershell
# attribute does
ret['pscompatibleversions'] = \
attribute['vdata'].replace(' ', '').split(',')
else:
# keys are lower case as python is case sensitive the
# registry is not
ret[attribute['vname'].lower()] = attribute['vdata']
else:
if shell not in regex_shells:
return {
'error': 'Salt does not know how to get the version number for '
'{0}'.format(shell),
'installed': None
}
shell_data = regex_shells[shell]
pattern = shell_data.pop(0)
# We need to make sure HOME set, so shells work correctly
# salt-call will general have home set, the salt-minion service may not
# We need to assume ports of unix shells to windows will look after
# themselves in setting HOME as they do it in many different ways
newenv = os.environ
if ('HOME' not in newenv) and (not salt.utils.platform.is_windows()):
newenv['HOME'] = os.path.expanduser('~')
log.debug('HOME environment set to %s', newenv['HOME'])
try:
proc = salt.utils.timed_subprocess.TimedProc(
shell_data,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=10,
env=newenv
)
except (OSError, IOError) as exc:
return {
'error': 'Unable to run command \'{0}\' Reason: {1}'.format(' '.join(shell_data), exc),
'installed': False,
}
try:
proc.run()
except TimedProcTimeoutError as exc:
return {
'error': 'Unable to run command \'{0}\' Reason: Timed out.'.format(' '.join(shell_data)),
'installed': False,
}
ret['path'] = which(shell_data[0])
pattern_result = re.search(pattern, proc.stdout, flags=re.IGNORECASE)
# only set version if we find it, so code later on can deal with it
if pattern_result:
ret['version_raw'] = pattern_result.group(1)
if 'version_raw' in ret:
version_results = re.match(r'(\d[\d.]*)', ret['version_raw'])
if version_results:
ret['installed'] = True
ver_list = version_results.group(1).split('.')[:3]
if len(ver_list) == 1:
ver_list.append('0')
ret['version'] = '.'.join(ver_list[:3])
else:
ret['installed'] = None # Have an unexpected result
# Get a list of the PowerShell modules which are potentially available
# to be imported
if shell == 'powershell' and ret['installed'] and list_modules:
ret['modules'] = salt.utils.powershell.get_modules()
if 'version' not in ret:
ret['error'] = 'The version regex pattern for shell {0}, could not ' \
'find the version string'.format(shell)
ret['stdout'] = proc.stdout # include stdout so they can see the issue
log.error(ret['error'])
return ret
def powershell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='debug',
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
depth=None,
encode_cmd=False,
success_retcodes=None,
**kwargs):
'''
Execute the passed PowerShell command and return the output as a dictionary.
Other ``cmd.*`` functions (besides ``cmd.powershell_all``)
return the raw text output of the command. This
function appends ``| ConvertTo-JSON`` to the command and then parses the
JSON into a Python dictionary. If you want the raw textual result of your
PowerShell command you should use ``cmd.run`` with the ``shell=powershell``
option.
For example:
.. code-block:: bash
salt '*' cmd.run '$PSVersionTable.CLRVersion' shell=powershell
salt '*' cmd.run 'Get-NetTCPConnection' shell=powershell
.. versionadded:: 2016.3.0
.. warning::
This passes the cmd argument directly to PowerShell
without any further processing! Be absolutely sure that you
have properly sanitized the command passed to this function
and do not use untrusted inputs.
In addition to the normal ``cmd.run`` parameters, this command offers the
``depth`` parameter to change the Windows default depth for the
``ConvertTo-JSON`` powershell command. The Windows default is 2. If you need
more depth, set that here.
.. note::
For some commands, setting the depth to a value greater than 4 greatly
increases the time it takes for the command to return and in many cases
returns useless data.
:param str cmd: The powershell command to run.
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.powershell 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool reset_system_locale: Resets the system locale
:param str saltenv: The salt environment to use. Default is 'base'
:param int depth: The number of levels of contained objects to be included.
Default is 2. Values greater than 4 seem to greatly increase the time
it takes for the command to complete for some commands. eg: ``dir``
.. versionadded:: 2016.3.4
:param bool encode_cmd: Encode the command before executing. Use in cases
where characters may be dropped or incorrectly converted when executed.
Default is False.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
:returns:
:dict: A dictionary of data returned by the powershell command.
CLI Example:
.. code-block:: powershell
salt '*' cmd.powershell "$PSVersionTable.CLRVersion"
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
# Append PowerShell Object formatting
# ConvertTo-JSON is only available on PowerShell 3.0 and later
psversion = shell_info('powershell')['psversion']
if salt.utils.versions.version_cmp(psversion, '2.0') == 1:
cmd += ' | ConvertTo-JSON'
if depth is not None:
cmd += ' -Depth {0}'.format(depth)
if encode_cmd:
# Convert the cmd to UTF-16LE without a BOM and base64 encode.
# Just base64 encoding UTF-8 or including a BOM is not valid.
log.debug('Encoding PowerShell command \'%s\'', cmd)
cmd_utf16 = cmd.decode('utf-8').encode('utf-16le')
cmd = base64.standard_b64encode(cmd_utf16)
encoded_cmd = True
else:
encoded_cmd = False
# Put the whole command inside a try / catch block
# Some errors in PowerShell are not "Terminating Errors" and will not be
# caught in a try/catch block. For example, the `Get-WmiObject` command will
# often return a "Non Terminating Error". To fix this, make sure
# `-ErrorAction Stop` is set in the powershell command
cmd = 'try {' + cmd + '} catch { "{}" }'
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell='powershell',
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
hide_output=hide_output,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
**kwargs)
try:
return salt.utils.json.loads(response)
except Exception:
log.error("Error converting PowerShell JSON return", exc_info=True)
return {}
def powershell_all(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_encoding=None,
output_loglevel='debug',
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
depth=None,
encode_cmd=False,
force_list=False,
success_retcodes=None,
**kwargs):
'''
Execute the passed PowerShell command and return a dictionary with a result
field representing the output of the command, as well as other fields
showing us what the PowerShell invocation wrote to ``stderr``, the process
id, and the exit code of the invocation.
This function appends ``| ConvertTo-JSON`` to the command before actually
invoking powershell.
An unquoted empty string is not valid JSON, but it's very normal for the
Powershell output to be exactly that. Therefore, we do not attempt to parse
empty Powershell output (which would result in an exception). Instead we
treat this as a special case and one of two things will happen:
- If the value of the ``force_list`` parameter is ``True``, then the
``result`` field of the return dictionary will be an empty list.
- If the value of the ``force_list`` parameter is ``False``, then the
return dictionary **will not have a result key added to it**. We aren't
setting ``result`` to ``None`` in this case, because ``None`` is the
Python representation of "null" in JSON. (We likewise can't use ``False``
for the equivalent reason.)
If Powershell's output is not an empty string and Python cannot parse its
content, then a ``CommandExecutionError`` exception will be raised.
If Powershell's output is not an empty string, Python is able to parse its
content, and the type of the resulting Python object is other than ``list``
then one of two things will happen:
- If the value of the ``force_list`` parameter is ``True``, then the
``result`` field will be a singleton list with the Python object as its
sole member.
- If the value of the ``force_list`` parameter is ``False``, then the value
of ``result`` will be the unmodified Python object.
If Powershell's output is not an empty string, Python is able to parse its
content, and the type of the resulting Python object is ``list``, then the
value of ``result`` will be the unmodified Python object. The
``force_list`` parameter has no effect in this case.
.. note::
An example of why the ``force_list`` parameter is useful is as
follows: The Powershell command ``dir x | Convert-ToJson`` results in
- no output when x is an empty directory.
- a dictionary object when x contains just one item.
- a list of dictionary objects when x contains multiple items.
By setting ``force_list`` to ``True`` we will always end up with a
list of dictionary items, representing files, no matter how many files
x contains. Conversely, if ``force_list`` is ``False``, we will end
up with no ``result`` key in our return dictionary when x is an empty
directory, and a dictionary object when x contains just one file.
If you want a similar function but with a raw textual result instead of a
Python dictionary, you should use ``cmd.run_all`` in combination with
``shell=powershell``.
The remaining fields in the return dictionary are described in more detail
in the ``Returns`` section.
Example:
.. code-block:: bash
salt '*' cmd.run_all '$PSVersionTable.CLRVersion' shell=powershell
salt '*' cmd.run_all 'Get-NetTCPConnection' shell=powershell
.. versionadded:: 2018.3.0
.. warning::
This passes the cmd argument directly to PowerShell without any further
processing! Be absolutely sure that you have properly sanitized the
command passed to this function and do not use untrusted inputs.
In addition to the normal ``cmd.run`` parameters, this command offers the
``depth`` parameter to change the Windows default depth for the
``ConvertTo-JSON`` powershell command. The Windows default is 2. If you need
more depth, set that here.
.. note::
For some commands, setting the depth to a value greater than 4 greatly
increases the time it takes for the command to return and in many cases
returns useless data.
:param str cmd: The powershell command to run.
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in
cases where sensitive information must be read from standard input.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.powershell_all 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param int timeout: A timeout in seconds for the executed process to
return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool reset_system_locale: Resets the system locale
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param str saltenv: The salt environment to use. Default is 'base'
:param int depth: The number of levels of contained objects to be included.
Default is 2. Values greater than 4 seem to greatly increase the time
it takes for the command to complete for some commands. eg: ``dir``
:param bool encode_cmd: Encode the command before executing. Use in cases
where characters may be dropped or incorrectly converted when executed.
Default is False.
:param bool force_list: The purpose of this parameter is described in the
preamble of this function's documentation. Default value is False.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
:return: A dictionary with the following entries:
result
For a complete description of this field, please refer to this
function's preamble. **This key will not be added to the dictionary
when force_list is False and Powershell's output is the empty
string.**
stderr
What the PowerShell invocation wrote to ``stderr``.
pid
The process id of the PowerShell invocation
retcode
This is the exit code of the invocation of PowerShell.
If the final execution status (in PowerShell) of our command
(with ``| ConvertTo-JSON`` appended) is ``False`` this should be non-0.
Likewise if PowerShell exited with ``$LASTEXITCODE`` set to some
non-0 value, then ``retcode`` will end up with this value.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' cmd.powershell_all "$PSVersionTable.CLRVersion"
CLI Example:
.. code-block:: bash
salt '*' cmd.powershell_all "dir mydirectory" force_list=True
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
# Append PowerShell Object formatting
cmd += ' | ConvertTo-JSON'
if depth is not None:
cmd += ' -Depth {0}'.format(depth)
if encode_cmd:
# Convert the cmd to UTF-16LE without a BOM and base64 encode.
# Just base64 encoding UTF-8 or including a BOM is not valid.
log.debug('Encoding PowerShell command \'%s\'', cmd)
cmd_utf16 = cmd.decode('utf-8').encode('utf-16le')
cmd = base64.standard_b64encode(cmd_utf16)
encoded_cmd = True
else:
encoded_cmd = False
# Retrieve the response, while overriding shell with 'powershell'
response = run_all(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell='powershell',
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
password=password,
encoded_cmd=encoded_cmd,
success_retcodes=success_retcodes,
**kwargs)
stdoutput = response['stdout']
# if stdoutput is the empty string and force_list is True we return an empty list
# Otherwise we return response with no result key
if not stdoutput:
response.pop('stdout')
if force_list:
response['result'] = []
return response
# If we fail to parse stdoutput we will raise an exception
try:
result = salt.utils.json.loads(stdoutput)
except Exception:
err_msg = "cmd.powershell_all " + \
"cannot parse the Powershell output."
response["cmd"] = cmd
raise CommandExecutionError(
message=err_msg,
info=response
)
response.pop("stdout")
if type(result) is not list:
if force_list:
response['result'] = [result]
else:
response['result'] = result
else:
# result type is list so the force_list param has no effect
response['result'] = result
return response
def run_bg(cmd,
cwd=None,
runas=None,
group=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
timeout=None,
output_encoding=None,
output_loglevel='debug',
log_callback=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
password=None,
prepend_path=None,
success_retcodes=None,
**kwargs):
r'''
.. versionadded: 2016.3.0
Execute the passed command in the background and return it's PID
.. note::
If the init system is systemd and the backgrounded task should run even
if the salt-minion process is restarted, prepend ``systemd-run
--scope`` to the command. This will reparent the process in its own
scope separate from salt-minion, and will not be affected by restarting
the minion service.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The directory from which to execute the command. Defaults
to the home directory of the user specified by ``runas`` (or the user
under which Salt is running if ``runas`` is not specified).
:param str group: Group to run command as. Not currently supported
on Windows.
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param str output_encoding: Control the encoding used to decode the
command's output.
.. note::
This should not need to be used in most cases. By default, Salt
will try to use the encoding detected from the system locale, and
will fall back to UTF-8 if this fails. This should only need to be
used in cases where the output of the command is encoded in
something other than the system locale or UTF-8.
To see the encoding Salt has detected from the system locale, check
the `locale` line in the output of :py:func:`test.versions_report
<salt.modules.test.versions_report>`.
.. versionadded:: 2018.3.0
:param str output_loglevel: Control the loglevel at which the output from
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool ignore_retcode: If the exit code of the command is nonzero,
this is treated as an error condition, and the output from the command
will be logged to the minion log. However, there are some cases where
programs use the return code for signaling and a nonzero exit code
doesn't necessarily mean failure. Pass this argument as ``True`` to
skip logging the output if the command has a nonzero exit code.
:param str runas: Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Specify an alternate shell. Defaults to the system's
default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or
redirection.
:param dict env: Environment variables to be set prior to execution.
.. note::
When passing environment variables on the CLI, they should be
passed as the string representation of a dictionary.
.. code-block:: bash
salt myminion cmd.run_bg 'some command' env='{"FOO": "bar"}'
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str prepend_path: $PATH segment to prepend (trailing ':' not
necessary) to $PATH
.. versionadded:: 2018.3.0
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja,
mako, and wempy are supported.
:param str umask: The umask (in octal) to use when running the command.
:param int timeout: A timeout in seconds for the executed process to return.
.. warning::
This function does not process commands through a shell unless the
``python_shell`` argument is set to ``True``. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or have the
python_shell=True flag set here.
The use of ``python_shell=True`` means that the shell will accept _any_
input including potentially malicious commands such as 'good_command;rm
-rf /'. Be absolutely certain that you have sanitized your input prior
to using ``python_shell=True``.
:param list success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines: False
If ``True``, Salt will not automatically convert the characters ``\\n``
present in the ``stdin`` value to newlines.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' cmd.run_bg "fstrim-all"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_bg template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run_bg "Get-ChildItem C:\\ " shell='powershell'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run_bg cmd='ls -lR / | sed -e s/=/:/g > /tmp/dontwait'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
res = _run(cmd,
stdin=None,
stderr=None,
stdout=None,
output_encoding=output_encoding,
output_loglevel=output_loglevel,
use_vt=None,
bg=True,
with_communicate=False,
rstrip=False,
runas=runas,
group=group,
shell=shell,
python_shell=python_shell,
cwd=cwd,
env=env,
clean_env=clean_env,
prepend_path=prepend_path,
template=template,
umask=umask,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
password=password,
success_retcodes=success_retcodes,
**kwargs
)
return {
'pid': res['pid']
}
|
DEFAULT_CONFIG = "Windows10SystemLog.txt"
class Windows10SystemLogger:
"""
Windows10SystemLogger writes error messages to the Windows10 System log
file for every rule in the Windows10 STIG that is violated.
"""
def __init__(self, filename=DEFAULT_CONFIG):
self.filename = filename
self.log = open(filename, 'w')
self.log.write("#########################\n\n")
self.log.write("Windows10 System Audit Findings\n\n")
def __del__(self):
print("Write out")
self.log.write("#########################\n\n")
self.log.close()
def get_filename(self):
return self.filename
def lan_manager_hash_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78287r1_rule: ")
self.log.write(
"The system must be configured to prevent the storage of the LAN Manager hash of passwords.\n\n")
def remote_assistance_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78141r1_rule: ")
self.log.write(
"Solicited Remote Assistance must not be allowed.\n\n")
def windows_installer_elevated_prviliges_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77815r1_rule: ")
self.log.write(
"The Windows Installer Always install with elevated privileges must be disabled.\n\n")
def non_volume_autoplay_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78157r1_rule: ")
self.log.write(
"Autoplay must be turned off for non-volume devices.\n\n")
def annonymous_pipe_access_restricted_errmsg(self, success):
if not success:
self.log.write("Check SV-78249r1_rule: ")
self.log.write(
"Anonymous access to Named Pipes and Shares must be restricted.\n\n")
def drive_autorun_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78163r1_rule: ")
self.log.write("Autoplay must be disabled for all drives.\n\n")
def autorun_commands_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78163r1_rule: ")
self.log.write(
"The default autorun behavior must be configured to prevent autorun commands.\n\n")
def sam_anonymous_enumeration_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78235r1_rule: ")
self.log.write(
"Anonymous enumeration of SAM accounts must not be allowed.\n\n")
def sehop_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-83445r1_rule: ")
self.log.write(
"Structured Exception Handling Overwrite Protection (SEHOP) must be turned on.\n\n")
def recovery_console_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78299r1_rule: ")
self.log.write(
"The Recovery Console option must be set to prevent automatic logon to the system.\n\n")
def lanman_auth_level_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78291r1_rule: ")
self.log.write(
"The LanMan authentication level must be set to send NTLMv2 response only, and to refuse LM and NTLM.\n\n")
def winrm_service_basic_auth_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78287r1_rule: ")
self.log.write(
"The Windows Remote Management (WinRM) service must not use Basic authentication.\n\n")
def annonymous_share_enumeration_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78287r1_rule: ")
self.log.write(
"Anonymous enumeration of shares must be restricted..\n\n")
def winrm_client_basic_auth_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77825r1_rule: ")
self.log.write(
"The Windows Remote Management (WinRM) client must not use Basic authentication.\n\n")
def emet_sehop_optout_set_errmsg(self, success):
if not success:
self.log.write("Check SV-77901r2_rule: ")
self.log.write(
"The Enhanced Mitigation Experience Toolkit (EMET) system-wide Structured Exception Handler Overwrite Protection (SEHOP) must be configured to Application Opt Out.\n\n")
def emet_deephooks_set_errmsg(self, success):
if not success:
self.log.write("Check SV-77901r2_rule: ")
self.log.write(
"The Enhanced Mitigation Experience Toolkit (EMET) Default Actions and Mitigations Settings must enable Deep Hooks.\n\n")
def unencrypted_passwd_smb_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78201r1_rule: ")
self.log.write(
"Unencrypted passwords must not be sent to third-party SMB Servers.\n\n")
def smartscreen_filter_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78203r1_rule: ")
self.log.write(
"The SmartScreen filter for Microsoft Edge must be enabled.\n\n")
def hardware_device_pfw_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78207r2_rule: ")
self.log.write(
"The use of a hardware security device with Microsoft Passport for Work must be enabled.\n\n")
def smb_packet_signing_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78209r1_rule: ")
self.log.write(
"The Windows SMB server must be configured to always perform SMB packet signing.\n\n")
def client_rpc_authentication_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78145r1_rule: ")
self.log.write(
" Client computers must be required to authenticate for RPC communication.\n\n")
def unauthenticated_rpc_elient_restricted_errmsg(self, success):
if not success:
self.log.write("Check SV-78147r1_rule: ")
self.log.write(
"Unauthenticated RPC clients must be restricted from connecting to the RPC server.\n\n")
def application_event_log_size_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78009r1_rule: ")
self.log.write(
"The Application event log size must be configured to 32768 KB or greater.\n\n")
def user_installation_option_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77811r1_rule: ")
self.log.write(
"Users must be prevented from changing installation options.\n\n")
def powershell_script_block_logging_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-83411r1_rule: ")
self.log.write(
"PowerShell script block logging must be enabled.\n\n")
def tcp_port_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78019r1_rule: ")
self.log.write(
"The system must be configured to send error reports on TCP port 1232.\n\n")
def strong_session_key_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78155r1_rule: ")
self.log.write(
"The system must be configured to require a strong session key.\n\n")
def tcp_port_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78019r1_rule: ")
self.log.write(
"The system must be configured to send error reports on TCP port 1232.\n\n")
def screen_saver_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78159r1_rule: ")
self.log.write(
"The machine inactivity limit must be set to 15 minutes, locking the system with the screensaver.\n\n")
def error_reports_generated_errmsg(self, success):
if not success:
self.log.write("Check SV-77949r1_rule: ")
self.log.write(
"The system must be configured to generate error reports.\n\n")
def smb_packet_signing_errmsg(self, success):
if not success:
self.log.write("Check SV-78197r1_rule: ")
self.log.write(
"The Windows SMB client must be enabled to perform SMB packet signing when possible.\n\n")
def inprivate_browsing_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78195r1_rule: ")
self.log.write(
"InPrivate browsing in Microsoft Edge must be disabled.\n\n")
def smb_packet_signing_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78193r1_rule: ")
self.log.write(
"The Windows SMB client must be configured to always perform SMB packet signing.\n\n")
def app_override_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78191r1_rule: ")
self.log.write(
"Users must not be allowed to ignore SmartScreen filter warnings for unverified files in Microsoft Edge.\n\n")
def automatic_logon_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78041r2_rule: ")
self.log.write("Automatic logons must be disabled.\n\n")
def ipv6_routing_protection_configured_errmsg(self, success):
if not success:
self.log.write("Check SV-78045r1_rule: ")
self.log.write(
"IPv6 source routing must be configured to highest protection.\n\n")
def screen_saver_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78325r1_rule: ")
self.log.write("A screen saver must be enabled on the system.\n\n")
def ip_source_routing_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78049r1_rule: ")
self.log.write(
"The system must be configured to prevent IP source routing.\n\n")
def multiple_error_reports_set_errmsg(self, success):
if not success:
self.log.write("Check SV-77987r1_rule: ")
self.log.write(
"The system must be configured to collect multiple error reports of the same event type.\n\n")
def enhanced_antispoofing_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78167r1_rule: ")
self.log.write(
"Enhanced anti-spoofing when available must be enabled for facial recognition.\n\n")
def winrm_runas_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77865r1_rule: ")
self.log.write(
"The Windows Remote Management (WinRM) service must not store RunAs credentials.\n\n")
def zone_info_saved_errmsg(self, success):
if not success:
self.log.write("Check SV-78287r1_rule: ")
self.log.write(
"Zone information must be preserved when saving attachments.\n\n")
def num_error_reports_configured_errmsg(self, success):
if not success:
self.log.write("Check SV-78033r1_rule: ")
self.log.write(
"The maximum number of error reports to archive on a system must be configured to 100 or greater.\n\n")
def lock_screen_camera_access_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78035r1_rule: ")
self.log.write(
" Camera access from the lock screen must be disabled.\n\n")
def queue_error_reports_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78037r1_rule: ")
self.log.write(
"The system must be configured to queue error reports until a local or DOD-wide collector is available.\n\n")
def lock_screen_slide_shows_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78039r1_rule: ")
self.log.write(
"The display of slide shows on the lock screen must be disabled.\n\n")
def winrm_unencrypted_traffic_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77859r1_rule: ")
self.log.write(
"The Windows Remote Management (WinRM) service must not allow unencrypted traffic.\n\n")
def smartscreen_admin_aproval_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78175r1_rule: ")
self.log.write(
"The Windows SmartScreen must be configured to require approval from an administrator before running downloaded unknown software.\n\n")
def windows_telemetry_data_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78173r1_rule: ")
self.log.write(
"Windows Telemetry must be configured to the lowest level of data sent to Microsoft.\n\n")
def classic_security_model_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78251r1_rule: ")
self.log.write(
"The system must be configured to use the Classic security model.\n\n")
def computer_identity_negotiation_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78253r1_rule: ")
self.log.write(
"Services using Local System that use Negotiate when reverting to NTLM authentication must use the computer identity vs. authenticating anonymously.\n\n")
def ntml_null_session_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78253r1_rule: ")
self.log.write(
"NTLM must be prevented from falling back to a Null session.\n\n")
def group_policy_objects_reprocess_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78099r1_rule: ")
self.log.write(
"Group Policy objects must be reprocessed even if they have not changed.\n\n")
def pku2u_authentication_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78099r1_rule: ")
self.log.write(
"PKU2U authentication using online identities must be prevented.\n\n")
def powershell_script_block_invocation_logging_errmsg(self, success):
if not success:
self.log.write("Check SV-83413r1_rule: ")
self.log.write(
"PowerShell script block invocation logging must be enabled.\n\n")
def all_error_ports_added_to_queue_errmsg(self, success):
if not success:
self.log.write("Check SV-78047r1_rule: ")
self.log.write(
"The system must be configured to add all error reports to the queue.\n\n")
def consent_override_behavior_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78065r1_rule: ")
self.log.write(
"The system must be configured to permit the default consent levels of Windows Error Reporting to override any other consent policy setting.\n\n")
def data_transmission_consent_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78061r1_rule: ")
self.log.write(
"The system must be configured to automatically consent to send all data requested by a local or DOD-wide error collection site.\n\n")
def pin_length_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78211r1_rule: ")
self.log.write(
"The minimum pin length for Microsoft Passport for Work must be 6 characters or greater.\n\n")
def encrypted_indexing_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78241r1_rule: ")
self.log.write(
"Indexing of encrypted files must be turned off.\n\n")
def password_storage_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78243r1_rule: ")
self.log.write(
"The system must be configured to prevent the storage of passwords and credentials.\n\n")
def elevated_network_domain_privlidge_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78087r1_rule: ")
self.log.write(
"Local administrator accounts must have their privileged token filtered to prevent elevated privileges from being used over the network on domain systems.\n\n")
def http_printer_driver_dl_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78105r1_rule: ")
self.log.write(
"Downloading print driver packages over HTTP must be prevented.\n\n")
def blank_passwd_accounts_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78107r1_rule: ")
self.log.write(
"Local accounts with blank passwords must be restricted to prevent access from the network.\n\n")
def wifi_sense_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78081r1_rule: ")
self.log.write("Wi-Fi Sense must be disabled.\n\n")
def emet_antidetours_set_errmsg(self, success):
if not success:
self.log.write("Check SV-77915r2_rule: ")
self.log.write(
"The Enhanced Mitigation Experience Toolkit (EMET) Default Actions and Mitigations Settings must enable Anti Detours.\n\n")
def uac_admin_mode_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78319r1_rule: ")
self.log.write(
"User Account Control must run all administrators in Admin Approval Mode, enabling UAC.\n\n")
def sys_event_log_size_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78017r1_rule: ")
self.log.write(
"The System event log size must be configured to 32768 KB or greater.\n\n")
def uac_elevate_restricted_errmsg(self, success):
if not success:
self.log.write(
"User Account Control must only elevate UIAccess applications that are installed in secure locations.\n\n")
def uac_installer_detection_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78315r1_rule: ")
self.log.write(
"User Account Control must be configured to detect application installations and prompt for elevation.\n\n")
def kerberos_encrypt_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78315r1_rule: ")
self.log.write(
"Kerberos encryption types must be configured to prevent the use of DES and RC4 encryption suites.\n\n")
def smb_packet_signing_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78213r1_rule: ")
self.log.write(
"The Windows SMB server must perform SMB packet signing when possible.\n\n")
def error_report_ssl_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78015r1_rule: ")
self.log.write(
"The system must be configured to use SSL to forward error reports.\n\n")
def domain_joined_computers_unenumerated_errmsg(self, success):
if not success:
self.log.write("Check SV-78015r1_rule: ")
self.log.write(
"Connected users on domain-joined computers must not be enumerated.\n\n")
def max_error_queue_reports_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78051r1_rule: ")
self.log.write(
"The maximum number of error reports to queue on a system must be configured to 50 or greater.\n\n")
def security_event_log_size_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78051r1_rule: ")
self.log.write(
"The Security event log size must be configured to 196608 KB or greater.\n\n")
def rss_feed_attachements_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78233r1_rule: ")
self.log.write(
"Attachments must be prevented from being downloaded from RSS feeds.\n\n")
def admin_account_elevation_enumeration_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78169r1_rule: ")
self.log.write(
"Administrator accounts must not be enumerated during elevation.\n\n")
def user_errmsg_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77995r1_rule: ")
self.log.write(
"The system must be configured to prevent the display of error messages to the user.\n\n")
def ignore_edge_warnings_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78189r1_rule: ")
self.log.write(
"Users must not be allowed to ignore SmartScreen filter warnings for malicious websites in Microsoft Edge.\n\n")
def wizard_provider_dl_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78189r1_rule: ")
self.log.write(
"Web publishing and online ordering wizards must be prevented from downloading a list of providers.\n\n")
def nondomain_domain_network_blocked_errmsg(self, success):
if not success:
self.log.write("Check SV-78075r1_rule: ")
self.log.write(
"Connections to non-domain networks when connected to a domain authenticated network must be blocked.\n\n")
def nui_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78119r1_rule: ")
self.log.write(
"The network selection user interface (UI) must not be displayed on the logon screen.\n\n")
def rds_encryption_level_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78231r1_rule: ")
self.log.write(
"Remote Desktop Services must be configured with the client connection encryption set to the required level.\n\n")
def screen_saver_passwd_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78231r1_rule: ")
self.log.write("The screen saver must be password protected.\n\n")
def uac_virtalilzation_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78321r1_rule: ")
self.log.write(
"User Account Control must virtualize file and registry write failures to per-user locations.\n\n")
def daily_error_reports_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78055r1_rule: ")
self.log.write(
"The system must be configured to attempt to forward queued error reports once a day.\n\n")
def annonymous_users_excluded_errmsg(self, success):
if not success:
self.log.write("Check SV-78245r1_rule: ")
self.log.write(
"The system must be configured to prevent anonymous users from having the same rights as the Everyone group.\n\n")
def error_report_archive_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78029r1_rule: ")
self.log.write(
"The system must be configured to store all data in the error report archive.\n\n")
def uac_elevation_requests_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78311r1_rule: ")
self.log.write(
"User Account Control must automatically deny elevation requests for standard users.\n\n")
def smb_insecure_login_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78059r1_rule: ")
self.log.write(
"Insecure logons to an SMB server must be disabled.\n\n")
def error_reports_archived_errmsg(self, success):
if not success:
self.log.write("Check SV-78025r1_rule: ")
self.log.write(
"The system must be configured to archive error reports.\n\n")
def remote_desktop_host_secure_rpc_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78227r1_rule: ")
self.log.write(
"The Remote Desktop Session Host must require secure RPC communications.\n\n")
def spn_client_accept_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78225r1_rule: ")
self.log.write(
" The service principal name (SPN) target name validation level must be configured to Accept if provided by client.\n\n")
def rsd_passwd_prompt_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78223r1_rule: ")
self.log.write(
"Remote Desktop Services must always prompt a client for passwords upon connection.\n\n")
def remote_desktop_session_hosts_local_drive_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78221r1_rule: ")
self.log.write(
"Local drives must be prevented from sharing with Remote Desktop Session Hosts.\n\n")
def outgoing_traffic_secured_errmsg(self, success):
if not success:
self.log.write("Check SV-78129r1_rule: ")
self.log.write(
"Outgoing secure channel traffic must be encrypted or signed.\n\n")
def pin_signin_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78127r1_rule: ")
self.log.write("Signing in using a PIN must be turned off.\n\n")
def local_user_enumeration_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78123r1_rule: ")
self.log.write(
"Local users on domain-joined computers must not be enumerated.\n\n")
def emet_banned_functions_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77923r2_rule: ")
self.log.write(
"The Enhanced Mitigation Experience Toolkit (EMET) Default Actions and Mitigations Settings must enable Banned Functions.\n\n")
def onedrive_storage_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78215r1_rule: ")
self.log.write(
"The use of OneDrive for storage must be disabled.\n\n")
def audit_policy_subcategories_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78125r1_rule: ")
self.log.write(
"Audit policy using subcategories must be enabled.\n\n")
def ldap_client_signing_level_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78293r1_rule: ")
self.log.write(
"The system must be configured to the required LDAP client signing level.\n\n")
def ntlm_ssp_client_session_security_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78295r1_rule: ")
self.log.write(
"The system must be configured to meet the minimum session security requirement for NTLM SSP based clients.\n\n")
def ntlm_ssp_server_session_security_configuered_errmsg(self, success):
if not success:
self.log.write("Check SV-78297r1_rule: ")
self.log.write(
"The system must be configured to meet the minimum session security requirement for NTLM SSP based servers.\n\n")
def winrm_digest_authentication_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77831r1_rule: ")
self.log.write(
"The Windows Remote Management (WinRM) client must not use Digest authentication.\n\n")
def command_line_creation_event_logged_errmsg(self, success):
if not success:
self.log.write("Check SV-83409r1_rule: ")
self.log.write(
"Command line data must be included in process creation events.\n\n")
def uac_approval_mode_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78307r1_rule: ")
self.log.write(
"User Account Control approval mode for the built-in Administrator must be enabled.\n\n")
def ac_sleep_wakeup_password_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78139r1_rule: ")
self.log.write(
"The user must be prompted for a password on resume from sleep (plugged in)\n\n")
def case_insensitivity_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78303r1_rule: ")
self.log.write(
"The system must be configured to require case insensitivity for non-Windows subsystems.\n\n")
def fips_compliant_algorithims_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78301r1_rule: ")
self.log.write(
"The system must be configured to use FIPS-compliant algorithms for encryption, hashing, and signing.\n\n")
def untrusted_fonts_blocked_errmsg(self, success):
if not success:
self.log.write("Check SV-78131r1_rule: ")
self.log.write(
"The system must be configured to block untrusted fonts from loading.\n\n")
def outgoing_traffic_signed_errmsg(self, success):
if not success:
self.log.write("Check SV-78137r1_rule: ")
self.log.write(
"Outgoing secure channel traffic must be signed when possible.\n\n")
def remote_desktop_client_password_unsaved_errmsg(self, success):
if not success:
self.log.write("Check SV-78219r1_rule: ")
self.log.write(
"Passwords must not be saved in the Remote Desktop Client.\n\n")
def dc_sleep_wakeup_password_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78135r1_rule: ")
self.log.write(
"Users must be prompted for a password on resume from sleep (on battery).\n\n")
def admin_consent_prompt_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78309r1_rule: ")
self.log.write(
"The system must be configured to audit Account Logon - Credential Validation failures.\n\n")
def machine_lockout_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78447r1_rule: ")
self.log.write(
"The machine account lockout threshold must be set to 10 on systems with BitLocker enabled.\n\n")
def http_printing_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78113r1_rule: ")
self.log.write("Printing over HTTP must be prevented.\n\n")
def restart_automatic_signin_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77823r1_rule: ")
self.log.write(
"Automatically signing in the last interactive user after a system-initiated restart must be disabled.\n\n")
def winrm_client_unencrypted_traffic_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-77829r1_rule: ")
self.log.write(
"The Windows Remote Management (WinRM) client must not allow unencrypted traffic.\n\n")
def optional_accounts_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78149r1_rule: ")
self.log.write(
"he setting to allow Microsoft accounts to be optional for modern style apps must be enabled.\n\n")
def session_suspension_time_set_errmsg(self, success):
if not success:
self.log.write("Check SV-78205r1_rule: ")
self.log.write(
"The amount of idle time required before suspending a session must be configured to 15 minutes or less.\n\n")
def password_reset_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78143r1_rule: ")
self.log.write(
"The computer account password must not be prevented from being reset.\n\n")
def password_age_configured_errmsg(self, success):
if not success:
self.log.write("Check SV-78151r1_rule: ")
self.log.write(
"The maximum age for machine account passwords must be configured to 30 days or less.\n\n")
def apci_data_collection_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78153r1_rule: ")
self.log.write(
"The Application Compatibility Program Inventory must be prevented from collecting data and sending the information to Microsoft.\n\n")
def login_cache_limited_errmsg(self, success):
if not success:
self.log.write("Check SV-78177r1_rule: ")
self.log.write("Caching of logon credentials must be limited.\n\n")
def forced_logoff_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78217r1_rule: ")
self.log.write(
"Users must be forcibly disconnected when their logon hours expire.\n\n")
def heap_termination_turnoff_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78217r1_rule: ")
self.log.write(
"Turning off File Explorer heap termination on corruption must be disabled.\n\n")
def domain_controller_authentication_not_required_errmsg(self, success):
if not success:
self.log.write("Check SV-78183r1_rule: ")
self.log.write(
"Domain Controller authentication must not be required to unlock the workstation.\n\n")
def imcp_redirect_enabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78053r1_rule: ")
self.log.write(
"The system must be configured to prevent Internet Control Message Protocol (ICMP) redirects from overriding Open Shortest Path First (OSPF) generated routes.\n\n")
def netbios_name_ignored_errmsg(self, success):
if not success:
self.log.write("Check SV-78057r1_rule: ")
self.log.write(
"The system must be configured to ignore NetBIOS name release requests except from WINS servers.\n\n")
def toast_notification_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-78329r1_rule: ")
self.log.write(
"Toast notifications to the lock screen must be turned off.\n\n")
def global_system_objets_permissions_disabled_errmsg(self, success):
if not success:
self.log.write("Check SV-80171r1_rule: ")
self.log.write(
"Windows Update must not obtain updates from other PCs on the Internet.\n\n")
|
import TestConstants
from generator.ExpressionParser import ExpressionParser
import unittest
class TestExpressionParser(unittest.TestCase):
# Test to verify the minute functionality & */multiple expression check.
def test_valid_minute_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_minutes(TestConstants.Minutes),[0,20,40])
# Test to verify the invalid minute functionality & */multiple expression check.
def test_invalid_minute_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertNotEqual(expressionParser._parse_minutes(TestConstants.Minutes),[1,20,40])
# Test to verify the hour functionality & [a-b] (hyphen) expression check.
def test_valid_hour_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_hours(TestConstants.Hours),[2,3,4])
# Test to verify the month_parsing functionality & comma seperated expression check.
def test_valid_day_in_month_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_month(TestConstants.Days_in_month),[6,8,9])
# Test to verify the week functionality & * expression check.
def test_valid_day_in_week_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_day_of_week(TestConstants.Days_in_week),[1,2,3,4,5,6,7])
# Test to verify the command functionality check.
def test_valid_command(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_command(TestConstants.Command),['/usr/bin/find'])
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.timezone import now as timezone_now
from zerver.data_import.slack import (
get_slack_api_data,
get_admin,
get_guest,
get_user_timezone,
fetch_shared_channel_users,
users_to_zerver_userprofile,
get_subscription,
channels_to_zerver_stream,
slack_workspace_to_realm,
get_message_sending_user,
channel_message_to_zerver_message,
convert_slack_workspace_messages,
do_convert_data,
process_message_files,
AddedChannelsT,
AddedMPIMsT,
DMMembersT,
ZerverFieldsT,
)
from zerver.data_import.import_util import (
build_zerver_realm,
build_subscription,
build_recipient,
build_usermessages,
build_defaultstream,
)
from zerver.data_import.sequencer import (
NEXT_ID,
)
from zerver.lib.import_realm import (
do_import_realm,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_helpers import (
get_test_image_file
)
from zerver.lib.topic import (
EXPORT_TOPIC_NAME,
)
from zerver.models import (
Realm,
get_realm,
RealmAuditLog,
Recipient,
UserProfile,
)
import ujson
import logging
import shutil
import os
import mock
from mock import ANY, call
from typing import Any, Dict, List, Set, Tuple, Iterator
def remove_folder(path: str) -> None:
if os.path.exists(path):
shutil.rmtree(path)
# This method will be used by the mock to replace requests.get
def mocked_requests_get(*args: List[str], **kwargs: List[str]) -> mock.Mock:
class MockResponse:
def __init__(self, json_data: Dict[str, Any], status_code: int) -> None:
self.json_data = json_data
self.status_code = status_code
def json(self) -> Dict[str, Any]:
return self.json_data
if args[0] == 'https://slack.com/api/users.list?token=xoxp-valid-token':
return MockResponse({"ok": True, "members": "user_data"}, 200)
elif args[0] == 'https://slack.com/api/users.list?token=xoxp-invalid-token':
return MockResponse({"ok": False, "error": "invalid_auth"}, 200)
else:
return MockResponse(None, 404)
class SlackImporter(ZulipTestCase):
logger = logging.getLogger()
# set logger to a higher level to suppress 'logger.INFO' outputs
logger.setLevel(logging.WARNING)
@mock.patch('requests.get', side_effect=mocked_requests_get)
def test_get_slack_api_data(self, mock_get: mock.Mock) -> None:
token = 'xoxp-valid-token'
slack_user_list_url = "https://slack.com/api/users.list"
self.assertEqual(get_slack_api_data(slack_user_list_url, "members", token=token),
"user_data")
token = 'xoxp-invalid-token'
with self.assertRaises(Exception) as invalid:
get_slack_api_data(slack_user_list_url, "members", token=token)
self.assertEqual(invalid.exception.args, ('Error accessing Slack API: invalid_auth',),)
token = 'xoxe-invalid-token'
with self.assertRaises(Exception) as invalid:
get_slack_api_data(slack_user_list_url, "members", token=token)
self.assertTrue(invalid.exception.args[0].startswith("Invalid Slack legacy token.\n"))
with self.assertRaises(Exception) as invalid:
get_slack_api_data(slack_user_list_url, "members")
self.assertEqual(invalid.exception.args, ('Slack token missing in kwargs',),)
token = 'xoxp-status404'
wrong_url = "https://slack.com/api/wrong"
with self.assertRaises(Exception) as invalid:
get_slack_api_data(wrong_url, "members", token=token)
self.assertEqual(invalid.exception.args, ('HTTP error accessing the Slack API.',),)
def test_build_zerver_realm(self) -> None:
realm_id = 2
realm_subdomain = "test-realm"
time = float(timezone_now().timestamp())
test_realm = build_zerver_realm(realm_id, realm_subdomain, time, 'Slack') # type: List[Dict[str, Any]]
test_zerver_realm_dict = test_realm[0]
self.assertEqual(test_zerver_realm_dict['id'], realm_id)
self.assertEqual(test_zerver_realm_dict['string_id'], realm_subdomain)
self.assertEqual(test_zerver_realm_dict['name'], realm_subdomain)
self.assertEqual(test_zerver_realm_dict['date_created'], time)
def test_get_admin(self) -> None:
user_data = [{'is_admin': True, 'is_owner': False, 'is_primary_owner': False},
{'is_admin': True, 'is_owner': True, 'is_primary_owner': False},
{'is_admin': True, 'is_owner': True, 'is_primary_owner': True},
{'is_admin': False, 'is_owner': False, 'is_primary_owner': False}]
self.assertEqual(get_admin(user_data[0]), True)
self.assertEqual(get_admin(user_data[1]), True)
self.assertEqual(get_admin(user_data[2]), True)
self.assertEqual(get_admin(user_data[3]), False)
def test_get_guest(self) -> None:
user_data = [{'is_restricted': False, 'is_ultra_restricted': False},
{'is_restricted': True, 'is_ultra_restricted': False},
{'is_restricted': False, 'is_ultra_restricted': True},
{'is_restricted': True, 'is_ultra_restricted': True}]
self.assertEqual(get_guest(user_data[0]), False)
self.assertEqual(get_guest(user_data[1]), True)
self.assertEqual(get_guest(user_data[2]), True)
self.assertEqual(get_guest(user_data[3]), True)
def test_get_timezone(self) -> None:
user_chicago_timezone = {"tz": "America/Chicago"}
user_timezone_none = {"tz": None}
user_no_timezone = {} # type: Dict[str, Any]
self.assertEqual(get_user_timezone(user_chicago_timezone), "America/Chicago")
self.assertEqual(get_user_timezone(user_timezone_none), "America/New_York")
self.assertEqual(get_user_timezone(user_no_timezone), "America/New_York")
@mock.patch("zerver.data_import.slack.get_data_file")
@mock.patch("zerver.data_import.slack.get_slack_api_data")
@mock.patch("zerver.data_import.slack.get_messages_iterator")
def test_fetch_shared_channel_users(self, messages_mock: mock.Mock, api_mock: mock.Mock,
data_file_mock: mock.Mock) -> None:
users = [{"id": "U061A1R2R"}, {"id": "U061A5N1G"}, {"id": "U064KUGRJ"}]
data_file_mock.side_effect = [
[
{"name": "general", "members": ["U061A1R2R", "U061A5N1G"]},
{"name": "sharedchannel", "members": ["U061A1R2R", "U061A3E0G"]}
],
[]
]
api_mock.side_effect = [
{"id": "U061A3E0G", "team_id": "T6LARQE2Z"},
{"domain": "foreignteam1"},
{"id": "U061A8H1G", "team_id": "T7KJRQE8Y"},
{"domain": "foreignteam2"},
]
messages_mock.return_value = [
{"user": "U061A1R2R"},
{"user": "U061A5N1G"},
{"user": "U061A8H1G"},
]
slack_data_dir = self.fixture_file_name('', type='slack_fixtures')
fetch_shared_channel_users(users, slack_data_dir, "token")
# Normal users
self.assertEqual(len(users), 5)
self.assertEqual(users[0]["id"], "U061A1R2R")
self.assertEqual(users[0]["is_mirror_dummy"], False)
self.assertFalse("team_domain" in users[0])
self.assertEqual(users[1]["id"], "U061A5N1G")
self.assertEqual(users[2]["id"], "U064KUGRJ")
# Shared channel users
self.assertEqual(users[3]["id"], "U061A3E0G")
self.assertEqual(users[3]["team_domain"], "foreignteam1")
self.assertEqual(users[3]["is_mirror_dummy"], True)
self.assertEqual(users[4]["id"], "U061A8H1G")
self.assertEqual(users[4]["team_domain"], "foreignteam2")
self.assertEqual(users[4]["is_mirror_dummy"], True)
api_calls = [
call("https://slack.com/api/users.info", "user", token="token", user="U061A3E0G"),
call("https://slack.com/api/team.info", "team", token="token", team="T6LARQE2Z"),
call("https://slack.com/api/users.info", "user", token="token", user="U061A8H1G"),
call("https://slack.com/api/team.info", "team", token="token", team="T7KJRQE8Y")
]
api_mock.assert_has_calls(api_calls, any_order=True)
@mock.patch("zerver.data_import.slack.get_data_file")
def test_users_to_zerver_userprofile(self, mock_get_data_file: mock.Mock) -> None:
custom_profile_field_user1 = {"Xf06054BBB": {"value": "random1"},
"Xf023DSCdd": {"value": "employee"}}
custom_profile_field_user2 = {"Xf06054BBB": {"value": "random2"},
"Xf023DSCdd": {"value": "employer"}}
user_data = [{"id": "U08RGD1RD",
"team_id": "T5YFFM2QY",
"name": "john",
"deleted": False,
"is_mirror_dummy": False,
"real_name": "John Doe",
"profile": {"image_32": "", "email": "jon@gmail.com", "avatar_hash": "hash",
"phone": "+1-123-456-77-868",
"fields": custom_profile_field_user1}},
{"id": "U0CBK5KAT",
"team_id": "T5YFFM2QY",
"is_admin": True,
"is_bot": False,
"is_owner": True,
"is_primary_owner": True,
'name': 'Jane',
"real_name": "Jane Doe",
"deleted": False,
"is_mirror_dummy": False,
"profile": {"image_32": "https://secure.gravatar.com/avatar/random.png",
"fields": custom_profile_field_user2,
"email": "jane@foo.com", "avatar_hash": "hash"}},
{"id": "U09TYF5Sk",
"team_id": "T5YFFM2QY",
"name": "Bot",
"real_name": "Bot",
"is_bot": True,
"deleted": False,
"is_mirror_dummy": False,
"profile": {"image_32": "https://secure.gravatar.com/avatar/random1.png",
"skype": "test_skype_name",
"email": "bot1@zulipchat.com", "avatar_hash": "hash"}},
{"id": "UHSG7OPQN",
"team_id": "T6LARQE2Z",
'name': 'matt.perry',
"color": '9d8eee',
"is_bot": False,
"is_app_user": False,
"is_mirror_dummy": True,
"team_domain": "foreignteam",
"profile": {"image_32": "https://secure.gravatar.com/avatar/random6.png",
"avatar_hash": "hash", "first_name": "Matt", "last_name": "Perry",
"real_name": "Matt Perry", "display_name": "matt.perry", "team": "T6LARQE2Z"}},
{"id": "U8VAHEVUY",
"team_id": "T5YFFM2QY",
"name": "steviejacob34",
"real_name": "Steve Jacob",
"is_admin": False,
"is_owner": False,
"is_primary_owner": False,
"is_restricted": True,
"is_ultra_restricted": False,
"is_bot": False,
"is_mirror_dummy": False,
"profile": {"email": "steviejacob34@yahoo.com", "avatar_hash": "hash",
"image_32": "https://secure.gravatar.com/avatar/random6.png"}},
{"id": "U8X25EBAB",
"team_id": "T5YFFM2QY",
"name": "pratikweb_0",
"real_name": "Pratik",
"is_admin": False,
"is_owner": False,
"is_primary_owner": False,
"is_restricted": True,
"is_ultra_restricted": True,
"is_bot": False,
"is_mirror_dummy": False,
"profile": {"email": "pratik@mit.edu", "avatar_hash": "hash",
"image_32": "https://secure.gravatar.com/avatar/random.png"}}]
mock_get_data_file.return_value = user_data
# As user with slack_id 'U0CBK5KAT' is the primary owner, that user should be imported first
# and hence has zulip_id = 1
test_slack_user_id_to_zulip_user_id = {'U08RGD1RD': 1, 'U0CBK5KAT': 0, 'U09TYF5Sk': 2, 'UHSG7OPQN': 3, 'U8VAHEVUY': 4, 'U8X25EBAB': 5}
slack_data_dir = './random_path'
timestamp = int(timezone_now().timestamp())
mock_get_data_file.return_value = user_data
zerver_userprofile, avatar_list, slack_user_id_to_zulip_user_id, customprofilefield, \
customprofilefield_value = users_to_zerver_userprofile(slack_data_dir, user_data, 1,
timestamp, 'test_domain')
# Test custom profile fields
self.assertEqual(customprofilefield[0]['field_type'], 1)
self.assertEqual(customprofilefield[3]['name'], 'skype')
cpf_name = {cpf['name'] for cpf in customprofilefield}
self.assertIn('phone', cpf_name)
self.assertIn('skype', cpf_name)
cpf_name.remove('phone')
cpf_name.remove('skype')
for name in cpf_name:
self.assertTrue(name.startswith('slack custom field '))
self.assertEqual(len(customprofilefield_value), 6)
self.assertEqual(customprofilefield_value[0]['field'], 0)
self.assertEqual(customprofilefield_value[0]['user_profile'], 1)
self.assertEqual(customprofilefield_value[3]['user_profile'], 0)
self.assertEqual(customprofilefield_value[5]['value'], 'test_skype_name')
# test that the primary owner should always be imported first
self.assertDictEqual(slack_user_id_to_zulip_user_id, test_slack_user_id_to_zulip_user_id)
self.assertEqual(len(avatar_list), 6)
self.assertEqual(len(zerver_userprofile), 6)
self.assertEqual(zerver_userprofile[0]['is_staff'], False)
self.assertEqual(zerver_userprofile[0]['is_bot'], False)
self.assertEqual(zerver_userprofile[0]['is_active'], True)
self.assertEqual(zerver_userprofile[0]['is_mirror_dummy'], False)
self.assertEqual(zerver_userprofile[0]['role'], UserProfile.ROLE_MEMBER)
self.assertEqual(zerver_userprofile[0]['enable_desktop_notifications'], True)
self.assertEqual(zerver_userprofile[0]['email'], 'jon@gmail.com')
self.assertEqual(zerver_userprofile[0]['full_name'], 'John Doe')
self.assertEqual(zerver_userprofile[1]['id'], test_slack_user_id_to_zulip_user_id['U0CBK5KAT'])
self.assertEqual(zerver_userprofile[1]['role'], UserProfile.ROLE_REALM_ADMINISTRATOR)
self.assertEqual(zerver_userprofile[1]['is_staff'], False)
self.assertEqual(zerver_userprofile[1]['is_active'], True)
self.assertEqual(zerver_userprofile[0]['is_mirror_dummy'], False)
self.assertEqual(zerver_userprofile[2]['id'], test_slack_user_id_to_zulip_user_id['U09TYF5Sk'])
self.assertEqual(zerver_userprofile[2]['is_bot'], True)
self.assertEqual(zerver_userprofile[2]['is_active'], True)
self.assertEqual(zerver_userprofile[2]['is_mirror_dummy'], False)
self.assertEqual(zerver_userprofile[2]['email'], 'bot1@zulipchat.com')
self.assertEqual(zerver_userprofile[2]['bot_type'], 1)
self.assertEqual(zerver_userprofile[2]['avatar_source'], 'U')
self.assertEqual(zerver_userprofile[3]['id'], test_slack_user_id_to_zulip_user_id['UHSG7OPQN'])
self.assertEqual(zerver_userprofile[3]['role'], UserProfile.ROLE_MEMBER)
self.assertEqual(zerver_userprofile[3]['is_staff'], False)
self.assertEqual(zerver_userprofile[3]['is_active'], False)
self.assertEqual(zerver_userprofile[3]['email'], 'matt.perry@foreignteam.slack.com')
self.assertEqual(zerver_userprofile[3]['realm'], 1)
self.assertEqual(zerver_userprofile[3]['full_name'], 'Matt Perry')
self.assertEqual(zerver_userprofile[3]['short_name'], 'matt.perry')
self.assertEqual(zerver_userprofile[3]['is_mirror_dummy'], True)
self.assertEqual(zerver_userprofile[3]['is_api_super_user'], False)
self.assertEqual(zerver_userprofile[4]['id'], test_slack_user_id_to_zulip_user_id['U8VAHEVUY'])
self.assertEqual(zerver_userprofile[4]['role'], UserProfile.ROLE_GUEST)
self.assertEqual(zerver_userprofile[4]['is_staff'], False)
self.assertEqual(zerver_userprofile[4]['is_active'], True)
self.assertEqual(zerver_userprofile[4]['is_mirror_dummy'], False)
self.assertEqual(zerver_userprofile[5]['id'], test_slack_user_id_to_zulip_user_id['U8X25EBAB'])
self.assertEqual(zerver_userprofile[5]['role'], UserProfile.ROLE_GUEST)
self.assertEqual(zerver_userprofile[5]['is_staff'], False)
self.assertEqual(zerver_userprofile[5]['is_active'], True)
self.assertEqual(zerver_userprofile[5]['is_mirror_dummy'], False)
def test_build_defaultstream(self) -> None:
realm_id = 1
stream_id = 1
default_channel_general = build_defaultstream(realm_id, stream_id, 1)
test_default_channel = {'stream': 1, 'realm': 1, 'id': 1}
self.assertDictEqual(test_default_channel, default_channel_general)
default_channel_general = build_defaultstream(realm_id, stream_id, 1)
test_default_channel = {'stream': 1, 'realm': 1, 'id': 1}
self.assertDictEqual(test_default_channel, default_channel_general)
def test_build_pm_recipient_sub_from_user(self) -> None:
zulip_user_id = 3
recipient_id = 5
subscription_id = 7
sub = build_subscription(recipient_id, zulip_user_id, subscription_id)
recipient = build_recipient(zulip_user_id, recipient_id, Recipient.PERSONAL)
self.assertEqual(recipient['id'], sub['recipient'])
self.assertEqual(recipient['type_id'], sub['user_profile'])
self.assertEqual(recipient['type'], Recipient.PERSONAL)
self.assertEqual(recipient['type_id'], 3)
self.assertEqual(sub['recipient'], 5)
self.assertEqual(sub['id'], 7)
self.assertEqual(sub['active'], True)
def test_build_subscription(self) -> None:
channel_members = ["U061A1R2R", "U061A3E0G", "U061A5N1G", "U064KUGRJ"]
slack_user_id_to_zulip_user_id = {"U061A1R2R": 1, "U061A3E0G": 8, "U061A5N1G": 7, "U064KUGRJ": 5}
subscription_id_count = 0
recipient_id = 12
zerver_subscription = [] # type: List[Dict[str, Any]]
final_subscription_id = get_subscription(channel_members, zerver_subscription,
recipient_id, slack_user_id_to_zulip_user_id,
subscription_id_count)
# sanity checks
self.assertEqual(final_subscription_id, 4)
self.assertEqual(zerver_subscription[0]['recipient'], 12)
self.assertEqual(zerver_subscription[0]['id'], 0)
self.assertEqual(zerver_subscription[0]['user_profile'],
slack_user_id_to_zulip_user_id[channel_members[0]])
self.assertEqual(zerver_subscription[2]['user_profile'],
slack_user_id_to_zulip_user_id[channel_members[2]])
self.assertEqual(zerver_subscription[3]['id'], 3)
self.assertEqual(zerver_subscription[1]['recipient'],
zerver_subscription[3]['recipient'])
self.assertEqual(zerver_subscription[1]['pin_to_top'], False)
def test_channels_to_zerver_stream(self) -> None:
slack_user_id_to_zulip_user_id = {"U061A1R2R": 1, "U061A3E0G": 8, "U061A5N1G": 7, "U064KUGRJ": 5}
zerver_userprofile = [{'id': 1}, {'id': 8}, {'id': 7}, {'id': 5}]
realm_id = 3
realm, added_channels, added_mpims, dm_members, slack_recipient_name_to_zulip_recipient_id = \
channels_to_zerver_stream(self.fixture_file_name("", "slack_fixtures"), realm_id,
{"zerver_userpresence": []}, slack_user_id_to_zulip_user_id,
zerver_userprofile)
test_added_channels = {'sharedchannel': ("C061A0HJG", 3), 'general': ("C061A0YJG", 1),
'general1': ("C061A0YJP", 2), 'random': ("C061A0WJG", 0)}
test_added_mpims = {'mpdm-user9--user2--user10-1': ('G9HBG2A5D', 0),
'mpdm-user6--user7--user4-1': ('G6H1Z0ZPS', 1),
'mpdm-user4--user1--user5-1': ('G6N944JPL', 2)}
test_dm_members = {'DJ47BL849': ('U061A1R2R', 'U061A5N1G'), 'DHX1UP7EG': ('U061A5N1G', 'U064KUGRJ'),
'DK8HSJDHS': ('U061A1R2R', 'U064KUGRJ'), 'DRS3PSLDK': ('U064KUGRJ', 'U064KUGRJ')}
slack_recipient_names = set(slack_user_id_to_zulip_user_id.keys()) | set(test_added_channels.keys()) \
| set(test_added_mpims.keys())
self.assertDictEqual(test_added_channels, added_channels)
# zerver defaultstream already tested in helper functions
self.assertEqual(realm["zerver_defaultstream"],
[{'id': 0, 'realm': 3, 'stream': 0}, {'id': 1, 'realm': 3, 'stream': 1}])
self.assertDictEqual(test_added_mpims, added_mpims)
self.assertDictEqual(test_dm_members, dm_members)
# We can't do an assertDictEqual since during the construction of Personal
# recipients, slack_user_id_to_zulip_user_id are iterated in diffrent order in Python 3.5 and 3.6.
self.assertEqual(set(slack_recipient_name_to_zulip_recipient_id.keys()), slack_recipient_names)
self.assertEqual(set(slack_recipient_name_to_zulip_recipient_id.values()), set(i for i in range(11)))
# functioning of zerver subscriptions are already tested in the helper functions
# This is to check the concatenation of the output lists from the helper functions
# subscriptions for stream
zerver_subscription = realm["zerver_subscription"]
zerver_recipient = realm["zerver_recipient"]
zerver_stream = realm["zerver_stream"]
self.assertEqual(self.get_set(zerver_subscription, "recipient"), {i for i in range(11)})
self.assertEqual(self.get_set(zerver_subscription, "user_profile"), {1, 5, 7, 8})
self.assertEqual(self.get_set(zerver_recipient, "id"), self.get_set(zerver_subscription, "recipient"))
self.assertEqual(self.get_set(zerver_recipient, "type_id"), {0, 1, 2, 3, 5, 7, 8})
self.assertEqual(self.get_set(zerver_recipient, "type"), {1, 2, 3})
# stream mapping
self.assertEqual(zerver_stream[0]['name'], "random")
self.assertEqual(zerver_stream[0]['deactivated'], True)
self.assertEqual(zerver_stream[0]['description'], 'no purpose')
self.assertEqual(zerver_stream[0]['invite_only'], False)
self.assertEqual(zerver_stream[0]['realm'], realm_id)
self.assertEqual(zerver_stream[2]['id'],
test_added_channels[zerver_stream[2]['name']][1])
self.assertEqual(self.get_set(realm["zerver_huddle"], "id"), {0, 1, 2})
self.assertEqual(realm["zerver_userpresence"], [])
@mock.patch("zerver.data_import.slack.users_to_zerver_userprofile",
return_value=[[], [], {}, [], []])
@mock.patch("zerver.data_import.slack.channels_to_zerver_stream",
return_value=[{"zerver_stream": []}, {}, {}, {}, {}])
def test_slack_workspace_to_realm(self, mock_channels_to_zerver_stream: mock.Mock,
mock_users_to_zerver_userprofile: mock.Mock) -> None:
realm_id = 1
user_list = [] # type: List[Dict[str, Any]]
realm, slack_user_id_to_zulip_user_id, slack_recipient_name_to_zulip_recipient_id, \
added_channels, added_mpims, dm_members, \
avatar_list, em = slack_workspace_to_realm('testdomain', realm_id, user_list, 'test-realm',
'./random_path', {})
test_zerver_realmdomain = [{'realm': realm_id, 'allow_subdomains': False,
'domain': 'testdomain', 'id': realm_id}]
# Functioning already tests in helper functions
self.assertEqual(slack_user_id_to_zulip_user_id, {})
self.assertEqual(added_channels, {})
self.assertEqual(added_mpims, {})
self.assertEqual(slack_recipient_name_to_zulip_recipient_id, {})
self.assertEqual(avatar_list, [])
mock_channels_to_zerver_stream.assert_called_once_with("./random_path", 1, ANY, {}, [])
passed_realm = mock_channels_to_zerver_stream.call_args_list[0][0][2]
zerver_realmdomain = passed_realm['zerver_realmdomain']
self.assertListEqual(zerver_realmdomain, test_zerver_realmdomain)
self.assertEqual(passed_realm['zerver_realm'][0]['description'], 'Organization imported from Slack!')
self.assertEqual(passed_realm['zerver_userpresence'], [])
self.assertEqual(len(passed_realm.keys()), 14)
self.assertEqual(realm['zerver_stream'], [])
self.assertEqual(realm['zerver_userprofile'], [])
self.assertEqual(realm['zerver_realmemoji'], [])
self.assertEqual(realm['zerver_customprofilefield'], [])
self.assertEqual(realm['zerver_customprofilefieldvalue'], [])
self.assertEqual(len(realm.keys()), 5)
def test_get_message_sending_user(self) -> None:
message_with_file = {'subtype': 'file', 'type': 'message',
'file': {'user': 'U064KUGRJ'}}
message_without_file = {'subtype': 'file', 'type': 'messge', 'user': 'U064KUGRJ'}
user_file = get_message_sending_user(message_with_file)
self.assertEqual(user_file, 'U064KUGRJ')
user_without_file = get_message_sending_user(message_without_file)
self.assertEqual(user_without_file, 'U064KUGRJ')
def test_build_zerver_message(self) -> None:
zerver_usermessage = [] # type: List[Dict[str, Any]]
# recipient_id -> set of user_ids
subscriber_map = {
2: {3, 7, 15, 16}, # these we care about
4: {12},
6: {19, 21},
}
recipient_id = 2
mentioned_user_ids = [7]
message_id = 9
um_id = NEXT_ID('user_message')
build_usermessages(
zerver_usermessage=zerver_usermessage,
subscriber_map=subscriber_map,
recipient_id=recipient_id,
mentioned_user_ids=mentioned_user_ids,
message_id=message_id,
is_private=False,
)
self.assertEqual(zerver_usermessage[0]['id'], um_id + 1)
self.assertEqual(zerver_usermessage[0]['message'], message_id)
self.assertEqual(zerver_usermessage[0]['flags_mask'], 1)
self.assertEqual(zerver_usermessage[1]['id'], um_id + 2)
self.assertEqual(zerver_usermessage[1]['message'], message_id)
self.assertEqual(zerver_usermessage[1]['user_profile'], 7)
self.assertEqual(zerver_usermessage[1]['flags_mask'], 9) # mentioned
self.assertEqual(zerver_usermessage[2]['id'], um_id + 3)
self.assertEqual(zerver_usermessage[2]['message'], message_id)
self.assertEqual(zerver_usermessage[3]['id'], um_id + 4)
self.assertEqual(zerver_usermessage[3]['message'], message_id)
@mock.patch("zerver.data_import.slack.build_usermessages", return_value = (2, 4))
def test_channel_message_to_zerver_message(self, mock_build_usermessage: mock.Mock) -> None:
user_data = [{"id": "U066MTL5U", "name": "john doe", "deleted": False, "real_name": "John"},
{"id": "U061A5N1G", "name": "jane doe", "deleted": False, "real_name": "Jane"},
{"id": "U061A1R2R", "name": "jon", "deleted": False, "real_name": "Jon"}]
slack_user_id_to_zulip_user_id = {"U066MTL5U": 5, "U061A5N1G": 24, "U061A1R2R": 43}
reactions = [{"name": "grinning", "users": ["U061A5N1G"], "count": 1}]
all_messages = [{"text": "<@U066MTL5U> has joined the channel", "subtype": "channel_join",
"user": "U066MTL5U", "ts": "1434139102.000002", "channel_name": "random"},
{"text": "<@U061A5N1G>: hey!", "user": "U061A1R2R",
"ts": "1437868294.000006", "has_image": True, "channel_name": "random"},
{"text": "random", "user": "U061A5N1G", "reactions": reactions,
"ts": "1439868294.000006", "channel_name": "random"},
{"text": "without a user", "user": None, # this message will be ignored as it has no user
"ts": "1239868294.000006", "channel_name": "general"},
{"text": "<http://journals.plos.org/plosone/article>", "user": "U061A1R2R",
"ts": "1463868370.000008", "channel_name": "general"},
{"text": "added bot", "user": "U061A5N1G", "subtype": "bot_add",
"ts": "1433868549.000010", "channel_name": "general"},
# This message will be ignored since it has no user and file is None.
# See #9217 for the situation; likely file uploads on archived channels
{'upload': False, 'file': None, 'text': 'A file was shared',
'channel_name': 'general', 'type': 'message', 'ts': '1433868549.000011',
'subtype': 'file_share'},
{"text": "random test", "user": "U061A1R2R",
"ts": "1433868669.000012", "channel_name": "general"},
{"text": "Hello everyone", "user": "U061A1R2R", "type": "message",
"ts": "1433868669.000015", "mpim_name": "mpdm-user9--user2--user10-1"},
{"text": "Who is watching the World Cup", "user": "U061A5N1G", "type": "message",
"ts": "1433868949.000015", "mpim_name": "mpdm-user6--user7--user4-1"},
{'client_msg_id': '998d9229-35aa-424f-8d87-99e00df27dc9', 'type': 'message',
'text': 'Who is coming for camping this weekend?', 'user': 'U061A1R2R',
'ts': '1553607595.000700', 'pm_name': 'DHX1UP7EG'},
{"client_msg_id": "998d9229-35aa-424f-8d87-99e00df27dc9", "type": "message",
"text": "<@U061A5N1G>: Are you in Kochi?", "user": "U066MTL5U",
"ts": "1553607595.000700", "pm_name": "DJ47BL849"}] # type: List[Dict[str, Any]]
slack_recipient_name_to_zulip_recipient_id = {'random': 2, 'general': 1, 'mpdm-user9--user2--user10-1': 5,
'mpdm-user6--user7--user4-1': 6, 'U066MTL5U': 7, 'U061A5N1G': 8,
'U061A1R2R': 8}
dm_members = {'DJ47BL849': ('U066MTL5U', 'U061A5N1G'), 'DHX1UP7EG': ('U061A5N1G', 'U061A1R2R')}
zerver_usermessage = [] # type: List[Dict[str, Any]]
subscriber_map = dict() # type: Dict[int, Set[int]]
added_channels = {'random': ('c5', 1), 'general': ('c6', 2)} # type: Dict[str, Tuple[str, int]]
zerver_message, zerver_usermessage, attachment, uploads, reaction = \
channel_message_to_zerver_message(
1, user_data, slack_user_id_to_zulip_user_id, slack_recipient_name_to_zulip_recipient_id,
all_messages, [], subscriber_map, added_channels, dm_members, 'domain', set())
# functioning already tested in helper function
self.assertEqual(zerver_usermessage, [])
# subtype: channel_join is filtered
self.assertEqual(len(zerver_message), 9)
self.assertEqual(uploads, [])
self.assertEqual(attachment, [])
# Test reactions
self.assertEqual(reaction[0]['user_profile'], 24)
self.assertEqual(reaction[0]['emoji_name'], reactions[0]['name'])
# Message conversion already tested in tests.test_slack_message_conversion
self.assertEqual(zerver_message[0]['content'], '@**Jane**: hey!')
self.assertEqual(zerver_message[0]['has_link'], False)
self.assertEqual(zerver_message[2]['content'], 'http://journals.plos.org/plosone/article')
self.assertEqual(zerver_message[2]['has_link'], True)
self.assertEqual(zerver_message[5]['has_link'], False)
self.assertEqual(zerver_message[7]['has_link'], False)
self.assertEqual(zerver_message[3][EXPORT_TOPIC_NAME], 'imported from slack')
self.assertEqual(zerver_message[3]['content'], '/me added bot')
self.assertEqual(zerver_message[4]['recipient'], slack_recipient_name_to_zulip_recipient_id['general'])
self.assertEqual(zerver_message[2][EXPORT_TOPIC_NAME], 'imported from slack')
self.assertEqual(zerver_message[1]['recipient'], slack_recipient_name_to_zulip_recipient_id['random'])
self.assertEqual(zerver_message[5]['recipient'], slack_recipient_name_to_zulip_recipient_id['mpdm-user9--user2--user10-1'])
self.assertEqual(zerver_message[6]['recipient'], slack_recipient_name_to_zulip_recipient_id['mpdm-user6--user7--user4-1'])
self.assertEqual(zerver_message[7]['recipient'], slack_recipient_name_to_zulip_recipient_id['U061A5N1G'])
self.assertEqual(zerver_message[7]['recipient'], slack_recipient_name_to_zulip_recipient_id['U061A5N1G'])
self.assertEqual(zerver_message[3]['id'], zerver_message[0]['id'] + 3)
self.assertEqual(zerver_message[4]['id'], zerver_message[0]['id'] + 4)
self.assertEqual(zerver_message[5]['id'], zerver_message[0]['id'] + 5)
self.assertEqual(zerver_message[7]['id'], zerver_message[0]['id'] + 7)
self.assertIsNone(zerver_message[3]['rendered_content'])
self.assertEqual(zerver_message[0]['has_image'], False)
self.assertEqual(zerver_message[0]['date_sent'], float(all_messages[1]['ts']))
self.assertEqual(zerver_message[2]['rendered_content_version'], 1)
self.assertEqual(zerver_message[0]['sender'], 43)
self.assertEqual(zerver_message[3]['sender'], 24)
self.assertEqual(zerver_message[5]['sender'], 43)
self.assertEqual(zerver_message[6]['sender'], 24)
self.assertEqual(zerver_message[7]['sender'], 43)
self.assertEqual(zerver_message[8]['sender'], 5)
@mock.patch("zerver.data_import.slack.channel_message_to_zerver_message")
@mock.patch("zerver.data_import.slack.get_messages_iterator")
def test_convert_slack_workspace_messages(self, mock_get_messages_iterator: mock.Mock,
mock_message: mock.Mock) -> None:
output_dir = os.path.join(settings.TEST_WORKER_DIR, 'test-slack-import')
os.makedirs(output_dir, exist_ok=True)
added_channels = {'random': ('c5', 1), 'general': ('c6', 2)} # type: Dict[str, Tuple[str, int]]
time = float(timezone_now().timestamp())
zerver_message = [{'id': 1, 'ts': time}, {'id': 5, 'ts': time}]
def fake_get_messages_iter(slack_data_dir: str, added_channels: AddedChannelsT,
added_mpims: AddedMPIMsT, dm_members: DMMembersT) -> Iterator[ZerverFieldsT]:
import copy
return iter(copy.deepcopy(zerver_message))
realm = {'zerver_subscription': []} # type: Dict[str, Any]
user_list = [] # type: List[Dict[str, Any]]
reactions = [{"name": "grinning", "users": ["U061A5N1G"], "count": 1}]
attachments = uploads = [] # type: List[Dict[str, Any]]
zerver_usermessage = [{'id': 3}, {'id': 5}, {'id': 6}, {'id': 9}]
mock_get_messages_iterator.side_effect = fake_get_messages_iter
mock_message.side_effect = [[zerver_message[:1], zerver_usermessage[:2],
attachments, uploads, reactions[:1]],
[zerver_message[1:2], zerver_usermessage[2:5],
attachments, uploads, reactions[1:1]]]
# Hacky: We should include a zerver_userprofile, not the empty []
test_reactions, uploads, zerver_attachment = convert_slack_workspace_messages(
'./random_path', user_list, 2, {}, {}, added_channels, {}, {},
realm, [], [], 'domain', output_dir=output_dir, chunk_size=1)
messages_file_1 = os.path.join(output_dir, 'messages-000001.json')
self.assertTrue(os.path.exists(messages_file_1))
messages_file_2 = os.path.join(output_dir, 'messages-000002.json')
self.assertTrue(os.path.exists(messages_file_2))
with open(messages_file_1) as f:
message_json = ujson.load(f)
self.assertEqual(message_json['zerver_message'], zerver_message[:1])
self.assertEqual(message_json['zerver_usermessage'], zerver_usermessage[:2])
with open(messages_file_2) as f:
message_json = ujson.load(f)
self.assertEqual(message_json['zerver_message'], zerver_message[1:2])
self.assertEqual(message_json['zerver_usermessage'], zerver_usermessage[2:5])
self.assertEqual(test_reactions, reactions)
@mock.patch("zerver.data_import.slack.requests.get")
@mock.patch("zerver.data_import.slack.process_uploads", return_value = [])
@mock.patch("zerver.data_import.slack.build_attachment",
return_value = [])
@mock.patch("zerver.data_import.slack.build_avatar_url")
@mock.patch("zerver.data_import.slack.build_avatar")
@mock.patch("zerver.data_import.slack.get_slack_api_data")
def test_slack_import_to_existing_database(self, mock_get_slack_api_data: mock.Mock,
mock_build_avatar_url: mock.Mock,
mock_build_avatar: mock.Mock,
mock_process_uploads: mock.Mock,
mock_attachment: mock.Mock,
mock_requests_get: mock.Mock) -> None:
test_slack_dir = os.path.join(settings.DEPLOY_ROOT, "zerver", "tests", "fixtures",
"slack_fixtures")
test_slack_zip_file = os.path.join(test_slack_dir, "test_slack_importer.zip")
test_slack_unzipped_file = os.path.join(test_slack_dir, "test_slack_importer")
test_realm_subdomain = 'test-slack-import'
output_dir = os.path.join(settings.DEPLOY_ROOT, "var", "test-slack-importer-data")
token = 'valid-token'
# If the test fails, the 'output_dir' would not be deleted and hence it would give an
# error when we run the tests next time, as 'do_convert_data' expects an empty 'output_dir'
# hence we remove it before running 'do_convert_data'
self.rm_tree(output_dir)
# Also the unzipped data file should be removed if the test fails at 'do_convert_data'
self.rm_tree(test_slack_unzipped_file)
user_data_fixture = ujson.loads(self.fixture_data('user_data.json', type='slack_fixtures'))
team_info_fixture = ujson.loads(self.fixture_data('team_info.json', type='slack_fixtures'))
mock_get_slack_api_data.side_effect = [user_data_fixture['members'], {}, team_info_fixture["team"]]
mock_requests_get.return_value.raw = get_test_image_file("img.png")
do_convert_data(test_slack_zip_file, output_dir, token)
self.assertTrue(os.path.exists(output_dir))
self.assertTrue(os.path.exists(output_dir + '/realm.json'))
realm_icons_path = os.path.join(output_dir, 'realm_icons')
realm_icon_records_path = os.path.join(realm_icons_path, 'records.json')
self.assertTrue(os.path.exists(realm_icon_records_path))
with open(realm_icon_records_path) as f:
records = ujson.load(f)
self.assertEqual(len(records), 2)
self.assertEqual(records[0]["path"], "0/icon.original")
self.assertTrue(os.path.exists(os.path.join(realm_icons_path, records[0]["path"])))
self.assertEqual(records[1]["path"], "0/icon.png")
self.assertTrue(os.path.exists(os.path.join(realm_icons_path, records[1]["path"])))
# test import of the converted slack data into an existing database
with self.settings(BILLING_ENABLED=False):
do_import_realm(output_dir, test_realm_subdomain)
realm = get_realm(test_realm_subdomain)
self.assertTrue(realm.name, test_realm_subdomain)
self.assertEqual(realm.icon_source, Realm.ICON_UPLOADED)
# test RealmAuditLog
realmauditlog = RealmAuditLog.objects.filter(realm=realm)
realmauditlog_event_type = {log.event_type for log in realmauditlog}
self.assertEqual(realmauditlog_event_type, {RealmAuditLog.SUBSCRIPTION_CREATED,
RealmAuditLog.REALM_PLAN_TYPE_CHANGED})
Realm.objects.filter(name=test_realm_subdomain).delete()
remove_folder(output_dir)
# remove tar file created in 'do_convert_data' function
os.remove(output_dir + '.tar.gz')
self.assertFalse(os.path.exists(output_dir))
def test_message_files(self) -> None:
alice_id = 7
alice = dict(
id=alice_id,
profile=dict(
email='alice@example.com',
),
)
files = [
dict(
url_private='files.slack.com/apple.png',
title='Apple',
name='apple.png',
mimetype='image/png',
timestamp=9999,
created=8888,
size=3000000,
),
dict(
url_private='example.com/banana.zip',
title='banana',
),
]
message = dict(
user=alice_id,
files=files,
)
domain_name = 'example.com'
realm_id = 5
message_id = 99
slack_user_id = 'alice'
users = [alice]
slack_user_id_to_zulip_user_id = {
'alice': alice_id,
}
zerver_attachment = [] # type: List[Dict[str, Any]]
uploads_list = [] # type: List[Dict[str, Any]]
info = process_message_files(
message=message,
domain_name=domain_name,
realm_id=realm_id,
message_id=message_id,
slack_user_id=slack_user_id,
users=users,
slack_user_id_to_zulip_user_id=slack_user_id_to_zulip_user_id,
zerver_attachment=zerver_attachment,
uploads_list=uploads_list,
)
self.assertEqual(len(zerver_attachment), 1)
self.assertEqual(len(uploads_list), 1)
image_path = zerver_attachment[0]['path_id']
self.assertIn('/SlackImportAttachment/', image_path)
expected_content = '[Apple](/user_uploads/{image_path})\n[banana](example.com/banana.zip)'.format(image_path=image_path)
self.assertEqual(info['content'], expected_content)
self.assertTrue(info['has_link'])
self.assertTrue(info['has_image'])
self.assertEqual(uploads_list[0]['s3_path'], image_path)
self.assertEqual(uploads_list[0]['realm_id'], realm_id)
self.assertEqual(uploads_list[0]['user_profile_email'], 'alice@example.com')
|
import pytest
import mfr
import mfr_rst
def test_detect(fakefile):
# set filename to have .rst extension
fakefile.name = 'mydoc.rst'
handler = mfr_rst.Handler()
assert handler.detect(fakefile) is True
@pytest.mark.parametrize('filename', [
'other.rs',
'otherrst',
'other',
'other.',
])
def test_does_not_detect_other_extensions(fakefile, filename):
fakefile.name = filename
handler = mfr_rst.Handler()
assert handler.detect(fakefile) is False
def test_render_rst_returns_render_result():
with open('mfr_rst/tests/test.rst') as fp:
result = mfr_rst.render.render_rst(fp)
assert type(result) == mfr.core.RenderResult
|
GET_INVERTER_REALTIME_DATA_SCOPE_DEVICE = {
"timestamp": {"value": "2020-09-18T14:14:24-07:00"},
"status": {"Code": 0, "Reason": "", "UserMessage": ""},
"energy_day": {"value": 6000, "unit": "Wh"},
"energy_total": {"value": 35611000, "unit": "Wh"},
"energy_year": {"value": 3310000, "unit": "Wh"},
"frequency_ac": {"value": 60, "unit": "Hz"},
"current_ac": {"value": 7.31, "unit": "A"},
"current_dc": {"value": 6.54, "unit": "A"},
"power_ac": {"value": 1762, "unit": "W"},
"voltage_ac": {"value": 241, "unit": "V"},
"voltage_dc": {"value": 286, "unit": "V"},
}
GET_INVERTER_REALTIME_DATA_SYSTEM = {
"timestamp": {"value": "2020-09-18T14:13:49-07:00"},
"status": {"Code": 0, "Reason": "", "UserMessage": ""},
"energy_day": {"value": 6000, "unit": "Wh"},
"energy_total": {"value": 35611000, "unit": "Wh"},
"energy_year": {"value": 3310000, "unit": "Wh"},
"power_ac": {"value": 1764, "unit": "W"},
"inverters": {
"1": {
"energy_day": {"value": 6000, "unit": "Wh"},
"energy_total": {"value": 35611000, "unit": "Wh"},
"energy_year": {"value": 3310000, "unit": "Wh"},
"power_ac": {"value": 1764, "unit": "W"},
}
},
}
|
from __future__ import division
from bisect import bisect
from collections import namedtuple
from math import sqrt, hypot
# a planner computes a motion profile for a list of (x, y) points
class Planner(object):
def __init__(self, acceleration, max_velocity, corner_factor):
self.acceleration = acceleration
self.max_velocity = max_velocity
self.corner_factor = corner_factor
def plan(self, points):
return constant_acceleration_plan(
points, self.acceleration, self.max_velocity, self.corner_factor
)
def plan_all(self, paths):
return [self.plan(path) for path in paths]
# a plan is a motion profile generated by the planner
class Plan(object):
def __init__(self, blocks):
self.blocks = blocks
self.ts = [] # start time of each block
self.ss = [] # start distance of each block
t = 0
s = 0
for b in blocks:
self.ts.append(t)
self.ss.append(s)
t += b.t
s += b.s
self.t = t # total time
self.s = s # total duration
def instant(self, t):
t = max(0, min(self.t, t)) # clamp t
i = bisect(self.ts, t) - 1 # find block for t
return self.blocks[i].instant(t - self.ts[i], self.ts[i], self.ss[i])
# a block is a constant acceleration for a duration of time
class Block(object):
def __init__(self, a, t, vi, p1, p2):
self.a = a
self.t = t
self.vi = vi
self.p1 = p1
self.p2 = p2
self.s = p1.distance(p2)
def instant(self, t, dt=0, ds=0):
t = max(0, min(self.t, t)) # clamp t
a = self.a
v = self.vi + self.a * t
s = self.vi * t + self.a * t * t / 2
s = max(0, min(self.s, s)) # clamp s
p = self.p1.lerps(self.p2, s)
return Instant(t + dt, p, s + ds, v, a)
# an instant gives position, velocity, etc. at a single point in time
Instant = namedtuple("Instant", ["t", "p", "s", "v", "a"])
# a = acceleration
# v = velocity
# s = distance
# t = time
# i = initial
# f = final
# vf = vi + a * t
# s = (vf + vi) / 2 * t
# s = vi * t + a * t * t / 2
# vf * vf = vi * vi + 2 * a * s
EPS = 1e-9
_Point = namedtuple("Point", ["x", "y"])
class Point(_Point):
def length(self):
return hypot(self.x, self.y)
def normalize(self):
d = self.length()
if d == 0:
return Point(0, 0)
return Point(self.x / d, self.y / d)
def distance(self, other):
return hypot(self.x - other.x, self.y - other.y)
def distance_squared(self, other):
return (self.x - other.x) ** 2 + (self.y - other.y) ** 2
def add(self, other):
return Point(self.x + other.x, self.y + other.y)
def sub(self, other):
return Point(self.x - other.x, self.y - other.y)
def mul(self, factor):
return Point(self.x * factor, self.y * factor)
def dot(self, other):
return self.x * other.x + self.y * other.y
def lerps(self, other, s):
v = other.sub(self).normalize()
return self.add(v.mul(s))
def segment_distance(self, v, w):
p = self
l2 = v.distance_squared(w)
if l2 == 0:
return p.distance(v)
t = ((p.x - v.x) * (w.x - v.x) + (p.y - v.y) * (w.y - v.y)) / l2
t = max(0, min(1, t))
x = v.x + t * (w.x - v.x)
y = v.y + t * (w.y - v.y)
q = Point(x, y)
return p.distance(q)
Triangle = namedtuple("Triangle", ["s1", "s2", "t1", "t2", "vmax", "p1", "p2", "p3"])
def triangle(s, vi, vf, a, p1, p3):
# compute a triangular profile: accelerating, decelerating
s1 = (2 * a * s + vf * vf - vi * vi) / (4 * a)
s2 = s - s1
vmax = (vi * vi + 2 * a * s1) ** 0.5
t1 = (vmax - vi) / a
t2 = (vf - vmax) / -a
p2 = p1.lerps(p3, s1)
return Triangle(s1, s2, t1, t2, vmax, p1, p2, p3)
Trapezoid = namedtuple(
"Trapezoid", ["s1", "s2", "s3", "t1", "t2", "t3", "p1", "p2", "p3", "p4"]
)
def trapezoid(s, vi, vmax, vf, a, p1, p4):
# compute a trapezoidal profile: accelerating, cruising, decelerating
t1 = (vmax - vi) / a
s1 = (vmax + vi) / 2 * t1
t3 = (vf - vmax) / -a
s3 = (vf + vmax) / 2 * t3
s2 = s - s1 - s3
t2 = s2 / vmax
p2 = p1.lerps(p4, s1)
p3 = p1.lerps(p4, s - s3)
return Trapezoid(s1, s2, s3, t1, t2, t3, p1, p2, p3, p4)
def corner_velocity(s1, s2, vmax, a, delta):
# compute a maximum velocity at the corner of two segments
# https://onehossshay.wordpress.com/2011/09/24/improving_grbl_cornering_algorithm/
cosine = -s1.vector.dot(s2.vector)
if abs(cosine - 1) < EPS:
return 0
sine = sqrt((1 - cosine) / 2)
if abs(sine - 1) < EPS:
return vmax
v = sqrt((a * delta * sine) / (1 - sine))
return min(v, vmax)
class Segment(object):
# a segment is a line segment between two points, which will be broken
# up into blocks by the planner
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.length = p1.distance(p2)
self.vector = p2.sub(p1).normalize()
self.max_entry_velocity = 0
self.entry_velocity = 0
self.blocks = []
class Throttler(object):
def __init__(self, points, vmax, dt, threshold):
self.points = points
self.vmax = vmax
self.dt = dt
self.threshold = threshold
self.distances = []
prev = points[0]
d = 0
for point in points:
d += prev.distance(point)
self.distances.append(d)
prev = point
def lookup(self, d):
return bisect(self.distances, d) - 1
def is_feasible(self, i0, v):
d = v * self.dt
x0 = self.distances[i0]
x1 = x0 + d
i1 = self.lookup(x1)
if i0 == i1:
return True
p0 = self.points[i0]
p10 = self.points[i1]
try:
p11 = self.points[i1 + 1]
except IndexError:
p11 = p10
s = x1 - self.distances[i1]
p1 = p10.lerps(p11, s)
i = i0 + 1
while i <= i1:
p = self.points[i]
if p.segment_distance(p0, p1) > self.threshold:
return False
i += 1
return True
def compute_max_velocity(self, index):
if self.is_feasible(index, self.vmax):
return self.vmax
lo = 0
hi = self.vmax
for _ in range(16):
v = (lo + hi) / 2
if self.is_feasible(index, v):
lo = v
else:
hi = v
v = lo
return v
def compute_max_velocities(self):
return [self.compute_max_velocity(i) for i in range(len(self.points))]
def constant_acceleration_plan(points, a, vmax, cf):
# make sure points are Point objects
points = [Point(x, y) for x, y in points]
# the throttler reduces speeds based on the discrete timeslicing nature of
# the device
# TODO: expose parameters
throttler = Throttler(points, vmax, 0.02, 0.001)
max_velocities = throttler.compute_max_velocities()
# create segments for each consecutive pair of points
segments = [Segment(p1, p2) for p1, p2 in zip(points, points[1:])]
# compute a max_entry_velocity for each segment
# based on the angle formed by the two segments at the vertex
for v, s1, s2 in zip(max_velocities, segments, segments[1:]):
s1.max_entry_velocity = min(s1.max_entry_velocity, v)
s2.max_entry_velocity = corner_velocity(s1, s2, vmax, a, cf)
# add a dummy segment at the end to force a final velocity of zero
segments.append(Segment(points[-1], points[-1]))
# loop over segments
i = 0
while i < len(segments) - 1:
# pull out some variables
segment = segments[i]
next_segment = segments[i + 1]
s = segment.length
vi = segment.entry_velocity
vexit = next_segment.max_entry_velocity
p1 = segment.p1
p2 = segment.p2
# determine which profile to use for this segment
m = triangle(s, vi, vexit, a, p1, p2)
if m.s1 < -EPS:
# too fast! update max_entry_velocity and backtrack
segment.max_entry_velocity = sqrt(vexit * vexit + 2 * a * s)
i -= 1
elif m.s2 < 0:
# accelerate
vf = sqrt(vi * vi + 2 * a * s)
t = (vf - vi) / a
segment.blocks = [
Block(a, t, vi, p1, p2),
]
next_segment.entry_velocity = vf
i += 1
elif m.vmax > vmax:
# accelerate, cruise, decelerate
z = trapezoid(s, vi, vmax, vexit, a, p1, p2)
segment.blocks = [
Block(a, z.t1, vi, z.p1, z.p2),
Block(0, z.t2, vmax, z.p2, z.p3),
Block(-a, z.t3, vmax, z.p3, z.p4),
]
next_segment.entry_velocity = vexit
i += 1
else:
# accelerate, decelerate
segment.blocks = [
Block(a, m.t1, vi, m.p1, m.p2),
Block(-a, m.t2, m.vmax, m.p2, m.p3),
]
next_segment.entry_velocity = vexit
i += 1
# concatenate all of the blocks
blocks = []
for segment in segments:
blocks.extend(segment.blocks)
# filter out zero-duration blocks and return
blocks = [b for b in blocks if b.t > EPS]
return Plan(blocks)
|
"""Logic Blocks devices."""
from typing import Any, List
from mpf.core.delays import DelayManager
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.events import event_handler
from mpf.core.machine import MachineController
from mpf.core.mode import Mode
from mpf.core.mode_device import ModeDevice
from mpf.core.player import Player
from mpf.core.system_wide_device import SystemWideDevice
from mpf.core.utility_functions import Util
class LogicBlockState:
"""Represents the state of a logic_block."""
__slots__ = ["enabled", "completed", "value"]
def __init__(self, start_value):
"""Initialise state."""
self.enabled = False
self.completed = False
self.value = start_value
@DeviceMonitor("value", "enabled", "completed")
class LogicBlock(SystemWideDevice, ModeDevice):
"""Parent class for each of the logic block classes."""
__slots__ = ["_state", "_start_enabled", "player_state_variable"]
def __init__(self, machine: MachineController, name: str) -> None:
"""Initialize logic block."""
super().__init__(machine, name)
self._state = None # type: LogicBlockState
self._start_enabled = None # type: bool
self.player_state_variable = "{}_state".format(self.name)
'''player_var: (logic_block)_state
desc: A dictionary that stores the internal state of the logic block
with the name (logic_block). (In other words, a logic block called
*mode1_hit_counter* will store its state in a player variable called
``mode1_hit_counter_state``).
The state that's stored in this variable include whether the logic
block is enabled and whether it's complete.
'''
async def _initialize(self):
await super()._initialize()
if self.config['start_enabled'] is not None:
self._start_enabled = self.config['start_enabled']
else:
self._start_enabled = not self.config['enable_events']
def add_control_events_in_mode(self, mode: Mode) -> None:
"""Do not auto enable this device in modes."""
def validate_and_parse_config(self, config: dict, is_mode_config: bool, debug_prefix: str = None) -> dict:
"""Validate logic block config."""
del is_mode_config
del debug_prefix
if 'events_when_complete' not in config:
config['events_when_complete'] = ['logicblock_' + self.name + '_complete']
if 'events_when_hit' not in config:
config['events_when_hit'] = ['logicblock_' + self.name + '_hit']
self.machine.config_validator.validate_config(
self.config_section, config, self.name, ("device", "logic_blocks_common"))
self._configure_device_logging(config)
return config
def can_exist_outside_of_game(self) -> bool:
"""Return true if persist_state is not set."""
return not bool(self.config['persist_state'])
def get_start_value(self) -> Any:
"""Return the start value for this block."""
raise NotImplementedError("implement")
async def device_added_system_wide(self):
"""Initialise internal state."""
self._state = LogicBlockState(self.get_start_value())
await super().device_added_system_wide()
if not self.config['enable_events']:
self.enable()
if self.config['persist_state']:
self.raise_config_error("Cannot set persist_state for system-wide logic_blocks", 1)
self.post_update_event()
def device_loaded_in_mode(self, mode: Mode, player: Player):
"""Restore internal state from player if persist_state is set or create new state."""
super().device_loaded_in_mode(mode, player)
if self.config['persist_state']:
if not player.is_player_var(self.player_state_variable):
player[self.player_state_variable] = LogicBlockState(self.get_start_value())
# enable device ONLY when we create a new entry in the player
if self._start_enabled:
mode.add_mode_event_handler("mode_{}_starting".format(mode.name),
self.event_enable, priority=mode.priority + 1)
self._state = player[self.player_state_variable]
else:
self._state = LogicBlockState(self.get_start_value())
if self._start_enabled:
mode.add_mode_event_handler("mode_{}_starting".format(mode.name),
self.event_enable, priority=mode.priority + 1)
mode.add_mode_event_handler("mode_{}_starting".format(mode.name), self.post_update_event)
def device_removed_from_mode(self, mode: Mode):
"""Unset internal state to prevent leakage."""
super().device_removed_from_mode(mode)
self._state = None
@property
def value(self):
"""Return value or None if that is currently not possible."""
if self._state:
return self._state.value
return None
@property
def enabled(self):
"""Return if enabled."""
return self._state and self._state.enabled
@enabled.setter
def enabled(self, value):
"""Set enable."""
self._state.enabled = value
@property
def completed(self):
"""Return if completed."""
return self._state and self._state.completed
@completed.setter
def completed(self, value):
"""Set if completed."""
self._state.completed = value
def post_update_event(self, **kwargs):
"""Post an event to notify about changes."""
del kwargs
value = self._state.value
enabled = self._state.enabled
self.machine.events.post("logicblock_{}_updated".format(self.name), value=value, enabled=enabled)
'''event: logicblock_(name)_updated
desc: The logic block called "name" has changed.
This might happen when the block advanced, it was resetted or restored.
args:
value: The current value of this block.
enabled: Whatever this block is enabled or not.
'''
def enable(self):
"""Enable this logic block.
Automatically called when one of the
enable_event events is posted. Can also manually be called.
"""
super().enable()
self.debug_log("Enabling")
self.enabled = True
self.post_update_event()
def _post_hit_events(self, **kwargs):
self.post_update_event()
for event in self.config['events_when_hit']:
self.machine.events.post(event, **kwargs)
'''event: logicblock_(name)_hit
desc: The logic block "name" was just hit.
Note that this is the default hit event for logic blocks,
but this can be changed in a logic block's "events_when_hit:"
setting, so this might not be the actual event that's posted for
all logic blocks in your machine.
args: depend on the type
'''
@event_handler(0)
def event_disable(self, **kwargs):
"""Event handler for disable event."""
del kwargs
self.disable()
def disable(self):
"""Disable this logic block.
Automatically called when one of the
disable_event events is posted. Can also manually be called.
"""
self.debug_log("Disabling")
self.enabled = False
self.post_update_event()
@event_handler(4)
def event_reset(self, **kwargs):
"""Event handler for reset event."""
del kwargs
self.reset()
def reset(self):
"""Reset the progress towards completion of this logic block.
Automatically called when one of the reset_event events is called.
Can also be manually called.
"""
self.completed = False
self._state.value = self.get_start_value()
self.debug_log("Resetting")
self.post_update_event()
@event_handler(5)
def event_restart(self, **kwargs):
"""Event handler for restart event."""
del kwargs
self.restart()
def restart(self):
"""Restart this logic block by calling reset() and enable().
Automatically called when one of the restart_event events is called.
Can also be manually called.
"""
self.debug_log("Restarting (resetting then enabling)")
self.reset()
self.enable()
def complete(self):
"""Mark this logic block as complete.
Posts the 'events_when_complete'
events and optionally restarts this logic block or disables it,
depending on this block's configuration settings.
"""
# if already completed do not complete again
if self.completed:
return
# otherwise mark as completed
self.completed = True
self.debug_log("Complete")
if self.config['events_when_complete']:
for event in self.config['events_when_complete']:
self.machine.events.post(event)
'''event: logicblock_(name)_complete
desc: The logic block called "name" has just been completed.
Note that this is the default completion event for logic blocks, but
this can be changed in a logic block's "events_when_complete:" setting,
so this might not be the actual event that's posted for all logic
blocks in your machine.
'''
# call reset to reset completion
if self.config['reset_on_complete']:
self.reset()
# disable block
if self.config['disable_on_complete']:
self.disable()
class Counter(LogicBlock):
"""A type of LogicBlock that tracks multiple hits of a single event.
This counter can be configured to track hits towards a specific end-goal
(like number of tilt hits to tilt), or it can be an open-ended count (like
total number of ramp shots).
It can also be configured to count up or to count down, and can have a
configurable counting interval.
"""
config_section = 'counters'
collection = 'counters'
class_label = 'counter'
__slots__ = ["delay", "ignore_hits", "hit_value"]
def __init__(self, machine: MachineController, name: str) -> None:
"""Initialise counter."""
super().__init__(machine, name)
self.debug_log("Creating Counter LogicBlock")
self.delay = DelayManager(self.machine)
self.ignore_hits = False
self.hit_value = -1
async def _initialize(self):
await super()._initialize()
self.hit_value = self.config['count_interval']
if self.config['direction'] == 'down' and self.hit_value > 0:
self.hit_value *= -1
elif self.config['direction'] == 'up' and self.hit_value < 0:
self.hit_value *= -1
# Add control events if included in the config
if self.config['control_events']:
self._setup_control_events(self.config['control_events'])
def add_control_events_in_mode(self, mode: Mode) -> None:
"""Do not auto enable this device in modes."""
def _setup_control_events(self, event_list):
self.debug_log("Setting up control events")
kwargs = {}
for entry in event_list:
if entry['action'] in ('add', 'subtract', 'jump'):
handler = getattr(self, "event_{}".format(entry['action']))
kwargs = {'value': entry['value']}
else:
raise AssertionError("Invalid control_event action {} in mode".
format(entry['action']), self.name)
self.machine.events.add_handler(entry['event'], handler, **kwargs)
def check_complete(self, count_complete_value=None):
"""Check if counter is completed.
Return true if the counter has reached or surpassed its specified
completion value, return False if no completion criteria or is
not complete.
"""
# If count_complete_value was not passed, obtain it
if count_complete_value is None and self.config.get("count_complete_value"):
count_complete_value = self.config["count_complete_value"].evaluate([])
if count_complete_value is not None:
if self.config['direction'] == 'up':
return self._state.value >= count_complete_value
if self.config['direction'] == 'down':
return self._state.value <= count_complete_value
return False
def event_add(self, value, **kwargs):
"""Add to the value of this counter.
Args:
value: Value to add to the counter.
kwargs: Additional arguments.
"""
evaluated_value = value.evaluate_or_none(kwargs)
if evaluated_value is None:
self.log.warning("Placeholder %s for counter add did not evaluate with args %s", value, kwargs)
return
# Add to the counter the specified value
self._state.value += evaluated_value
# Check if count is complete given the updated value
if self.check_complete():
self.complete()
def event_subtract(self, value, **kwargs):
"""Subtract from the value of this counter.
Args:
value: Value to subtract from the counter.
kwargs: Additional arguments.
"""
evaluated_value = value.evaluate_or_none(kwargs)
if evaluated_value is None:
self.log.warning("Placeholder %s for counter substract did not evaluate with args %s", value, kwargs)
return
# Subtract from the counter the specified value
self._state.value -= evaluated_value
# Check if count is complete given the updated value
if self.check_complete():
self.complete()
def event_jump(self, value, **kwargs):
"""Set the internal value of the counter.
Args:
value: Value to add to jump to.
kwargs: Additional arguments.
"""
evaluated_value = value.evaluate_or_none(kwargs)
if evaluated_value is None:
self.log.warning("Placeholder %s for counter jump did not evaluate with args %s", value, kwargs)
return
# Set the internal value of the counter to the specified value
self._state.value = evaluated_value
# Check if count is complete given the updated value
if self.check_complete():
self.complete()
def get_start_value(self) -> int:
"""Return start count."""
return self.config['starting_count'].evaluate([])
def validate_and_parse_config(self, config: dict, is_mode_config: bool, debug_prefix: str = None) -> dict:
"""Validate logic block config."""
if 'events_when_hit' not in config:
# for compatibility post the same default as previously for
# counters. This one is deprecated.
config['events_when_hit'] = ['counter_' + self.name + '_hit']
# this is the one moving forward
config['events_when_hit'].append('logicblock_' + self.name + '_hit')
return super().validate_and_parse_config(config, is_mode_config, debug_prefix)
@event_handler(0)
def event_count(self, **kwargs):
"""Event handler for count events."""
del kwargs
self.count()
def count(self):
"""Increase the hit progress towards completion.
This method is also automatically called when one of the
``count_events`` is posted.
"""
if not self.enabled:
return
count_complete_value = self.config['count_complete_value'].evaluate([]) if self.config['count_complete_value']\
is not None else None
if not self.ignore_hits:
self._state.value += self.hit_value
self.debug_log("Processing Count change. Total: %s", self._state.value)
args = {
"count": self._state.value
}
if count_complete_value is not None:
args['remaining'] = count_complete_value - self._state.value
self._post_hit_events(**args)
if self.check_complete(count_complete_value):
self.complete()
if self.config['multiple_hit_window']:
self.debug_log("Beginning Ignore Hits")
self.ignore_hits = True
self.delay.add(name='ignore_hits_within_window',
ms=self.config['multiple_hit_window'],
callback=self.stop_ignoring_hits)
def stop_ignoring_hits(self, **kwargs):
"""Cause the Counter to stop ignoring subsequent hits that occur within the 'multiple_hit_window'.
Automatically called when the window time expires. Can safely be manually called.
"""
del kwargs
self.debug_log("Ending Ignore hits")
self.ignore_hits = False
class Accrual(LogicBlock):
"""A type of LogicBlock which tracks many different events (steps) towards a goal.
The steps are able to happen in any order.
"""
config_section = 'accruals'
collection = 'accruals'
class_label = 'accrual'
__slots__ = []
@property
def config_section_name(self):
"""Return config section."""
return "accrual"
def __init__(self, machine, name):
"""Initialise Accrual."""
super().__init__(machine, name)
self.debug_log("Creating Accrual LogicBlock")
async def _initialize(self):
await super()._initialize()
self.setup_event_handlers()
def get_start_value(self) -> List[bool]:
"""Return start states."""
return [False] * len(self.config['events'])
def setup_event_handlers(self):
"""Add event handlers."""
for step, events in enumerate(self.config['events']):
for event in Util.string_to_list(events):
self.machine.events.add_handler(event, self.hit, step=step)
def hit(self, step: int, **kwargs):
"""Increase the hit progress towards completion.
Automatically called
when one of the `count_events` is posted. Can also manually be
called.
Args:
step: Integer of the step number (0 indexed) that was just hit.
"""
del kwargs
if not self.enabled:
return
self.debug_log("Processing hit for step: %s", step)
if not self._state.value[step]:
self._state.value[step] = True
self.debug_log("Status: %s", self._state.value)
self._post_hit_events(step=step)
if self._state.value.count(True) == len(self._state.value):
self.complete()
class Sequence(LogicBlock):
"""A type of LogicBlock which tracks many different events (steps) towards a goal.
The steps have to happen in order.
"""
config_section = 'sequences'
collection = 'sequences'
class_label = 'sequence'
__slots__ = []
@property
def config_section_name(self):
"""Return config section."""
return "sequence"
def __init__(self, machine: MachineController, name: str) -> None:
"""Initialise sequence."""
super().__init__(machine, name)
self.debug_log("Creating Sequence LogicBlock")
async def _initialize(self):
"""Initialise sequence."""
await super()._initialize()
self.setup_event_handlers()
def get_start_value(self) -> int:
"""Return start step."""
return 0
def setup_event_handlers(self):
"""Add the handlers for the current step."""
for step, events in enumerate(self.config['events']):
for event in Util.string_to_list(events):
# increase priority with steps to prevent advancing multiple steps at once
self.machine.events.add_handler(event, self.hit, step=step, priority=step)
def hit(self, step: int = None, **kwargs):
"""Increase the hit progress towards completion.
Automatically called
when one of the `count_events` is posted. Can also manually be
called.
"""
del kwargs
if not self.enabled:
return
if step is not None and step != self._state.value:
# got this for another state
return
self.debug_log("Processing Hit")
self._state.value += 1
self._post_hit_events(step=self._state.value)
if self._state.value >= len(self.config['events']):
self.complete()
|
"""
OONI Probe Services API - URL prioritization
"""
from typing import List
import random
import time
from flask import Blueprint, current_app, request
from flask.json import jsonify
prio_bp = Blueprint("prio", "probe_services_prio")
# TODO add unit tests
test_items = {}
last_update_time = 0
def update_url_prioritization():
log = current_app.logger
log.info("Started update_url_prioritization")
# conn = connect_db(conf)
# cur = conn.cursor(cursor_factory=RealDictCursor)
log.info("Regenerating URL prioritization file")
sql = """SELECT priority, domain, url, cc, category_code FROM citizenlab"""
q = current_app.db_session.execute(sql)
entries = list(q.fetchall())
# Create dict: cc -> category_code -> [entry, ... ]
entries_by_country = {}
for e in entries:
country = e["cc"].upper()
if country not in entries_by_country:
entries_by_country[country] = {}
ccode = e["category_code"]
entries_by_country[country].setdefault(ccode, []).append(e)
# Merge ZZ into each country, so that "global" urls are given out to probes
# from every country. Also keep ZZ as valid cc in case a probe requests it
zz = entries_by_country["ZZ"]
for ccode, country_dict in entries_by_country.items():
for category_code, prio_test_items in zz.items():
country_dict.setdefault(category_code, []).extend(prio_test_items)
log.info("Update done: %d" % len(entries_by_country))
return entries_by_country
def algo_chao(s: List, k: int) -> List:
"""Chao weighted random sampling
"""
n = len(s)
assert len(s) >= k
wsum = 0
r = s[:k]
assert len(r) == k
for i in range(0, n):
wsum = wsum + s[i]["priority"]
if i < k:
continue
p = s[i]["priority"] / wsum # probability for this item
j = random.random()
if j <= p:
pos = random.randint(0, k - 1)
r[pos] = s[i]
return r
def generate_test_list(country_code: str, category_codes: str, limit: int):
global test_items, last_update_time
log = current_app.logger
if last_update_time < time.time() - 100: # conf.refresh_interval:
last_update_time = time.time()
try:
test_items = update_url_prioritization()
except Exception as e:
log.error(e, exc_info=1)
candidates_d = test_items[country_code] # category_code -> [test_item, ... ]
if category_codes:
category_codes = [c.strip().upper() for c in category_codes.split(",")]
else:
category_codes = candidates_d.keys()
candidates = []
for ccode in category_codes:
s = candidates_d.get(ccode, [])
candidates.extend(s)
log.info("%d candidates", len(candidates))
if limit == -1:
limit = 100
limit = min(limit, len(candidates))
selected = algo_chao(candidates, limit)
out = []
for entry in selected:
cc = "XX" if entry["cc"] == "ZZ" else entry["cc"].upper()
out.append(
{
"category_code": entry["category_code"],
"url": entry["url"],
"country_code": cc,
}
)
return out
@prio_bp.route("/api/v1/test-list/urls")
def list_test_urls():
"""Generate test URL list with prioritization
https://orchestrate.ooni.io/api/v1/test-list/urls?country_code=IT
---
parameters:
- name: country_code
in: query
type: string
description: Two letter, uppercase country code
- name: category_code
in: query
type: string
description: Comma separated list of URL categories, all uppercase
responses:
'200':
description: URL test list
"""
log = current_app.logger
param = request.args.get
try:
country_code = (param("country_code") or "ZZ").upper()
category_codes = param("category_code")
limit = int(param("limit") or -1)
test_items = generate_test_list(country_code, category_codes, limit)
out = {
"metadata": {
"count": len(test_items),
"current_page": -1,
"limit": -1,
"next_url": "",
"pages": 1,
},
"results": test_items,
}
return jsonify(out)
except Exception as e:
log.error(e, exc_info=1)
return jsonify({})
|
import json
import logging
import ibmsecurity.utilities.tools
from ibmsecurity.utilities import tools
from io import open
logger = logging.getLogger(__name__)
uri = "/extensions"
requires_modules = None
requires_version = "9.0.5.0"
try:
basestring
except NameError:
basestring = (str, bytes)
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieve installed extensions list
"""
return isamAppliance.invoke_get("Retrieve installed extensions list",
"{0}/".format(uri), requires_modules=requires_modules,
requires_version=requires_version)
def add(isamAppliance, extension, config_data=None, third_party_package=None, check_mode=False, force=False):
"""
Installing an Extension
:param isamAppliance:
:param extension: path/filename to .ext file
:param config_data: all the config data in a single string. For example, "agentName:ISAM_Monitoring,ipAddress:10.10.10.10,port:9998"
:param third_party_package: an array of the supporting files required.
:param check_mode:
:param force:
:return:
"""
try:
id = inspect(isamAppliance, extension)
except Exception as e:
warning_str = "Exception occurred: {0}".format(e)
return isamAppliance.create_return_object(warnings=[warning_str])
if config_data:
config_str = '{extId:' + id + ',' + config_data + '}'
else:
config_str = '{extId:' + id + '}'
files = {}
files['extension_support_package'] = (tools.path_leaf(extension), open(extension, 'rb'))
files['config_data'] = (None, config_str)
if third_party_package:
if isinstance(third_party_package, basestring):
files['third_party_package'] = (tools.path_leaf(third_party_package), open(third_party_package, 'rb'))
elif len(third_party_package) == 1:
files['third_party_package'] = (tools.path_leaf(third_party_package[0]), open(third_party_package[0], 'rb'))
else:
counter = 0
for file in third_party_package:
third_party = 'third_party_package{0}'.format(counter)
files[third_party] = (tools.path_leaf(file), open(file, 'rb'))
counter = counter + 1
if check_mode:
return isamAppliance.create_return_object(changed=True)
return isamAppliance.invoke_post_files(
"Installing an Extension",
"{0}/activate".format(uri),
[],
files,
requires_modules=requires_modules,
requires_version=requires_version,
json_response=False,
data_as_files=True)
def update(isamAppliance, extId, config_data=None, third_party_package=None, check_mode=False, force=False):
"""
Update an existing installed extension
:param isamAppliance:
:param extId: extension id
:param config_data: all the config data in a single string. For example, "agentName:ISAM_Monitoring,ipAddress:10.10.10.10,port:9998"
:param third_party_package: list of third_party files
:param check_mode:
:param force:
:return:
"""
if force is True or search(isamAppliance, extId=extId):
if check_mode:
return isamAppliance.create_return_object(changed=True)
else:
if config_data:
config_str = '{extId:' + extId + ',' + config_data + '}'
else:
config_str = '{extId:' + extId + '}'
files = {}
files['config_data'] = (None, config_str)
if third_party_package:
if isinstance(third_party_package, basestring):
files['third_party_package'] = (
tools.path_leaf(third_party_package), open(third_party_package, 'rb'))
elif len(third_party_package) == 1:
files['third_party_package'] = (
tools.path_leaf(third_party_package[0]), open(third_party_package[0], 'rb'))
else:
counter = 0
for file in third_party_package:
third_party = 'third_party_package{0}'.format(counter)
files[third_party] = (tools.path_leaf(file), open(file, 'rb'))
counter = counter + 1
return isamAppliance.invoke_post_files(
"Update an Extension",
"{0}/{1}".format(uri, extId),
[],
files,
requires_modules=requires_modules,
requires_version=requires_version,
json_response=False,
data_as_files=True)
return isamAppliance.create_return_object()
def set(isamAppliance, extension=None, extId=None, config_data=None, third_party_package=None, check_mode=False,
force=False):
if extId:
if search(isamAppliance, extId):
return update(isamAppliance=isamAppliance, extId=extId, config_data=config_data,
third_party_package=third_party_package, check_mode=check_mode, force=True)
else:
return add(isamAppliance=isamAppliance, extension=extension, config_data=config_data,
third_party_package=third_party_package, check_mode=check_mode, force=force)
return isamAppliance.create_return_object()
def delete(isamAppliance, extId, check_mode=False, force=False):
"""
Delete an installed extension
"""
if force is True or search(isamAppliance, extId=extId):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Delete an installed extension",
"{0}/{1}".format(uri, extId))
return isamAppliance.create_return_object(changed=False)
def inspect(isamAppliance, extension, check_mode=False, force=False):
"""
Inspect the extension file to find the id for the extension.
:param isamAppliance:
:param extension:
:param check_mode:
:param force:
:return:
"""
obj = isamAppliance.invoke_post_files("Inspect extension",
"{0}/inspect".format(uri),
[{
'file_formfield': 'extension_support_package',
'filename': extension,
'mimetype': 'application/octet-stream'
}],
{
},
json_response=False, requires_modules=requires_modules,
requires_version=requires_version)
m_obj = obj['data']
m_obj = m_obj.replace('<textarea>', '')
m_obj = m_obj.replace('</textarea>', '')
json_obj = json.loads(m_obj)
return json_obj['id']
def search(isamAppliance, extId, check_mode=False, force=False):
"""
Search for the extension
"""
ret_obj = get_all(isamAppliance)
for obj in ret_obj['data']:
if obj['id'] == extId:
return True
return False
def compare(isamAppliance1, isamAppliance2):
"""
Compare extensions between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
if ret_obj1['data']:
del ret_obj1['data'][0]['date']
if ret_obj2['data']:
del ret_obj2['data'][0]['date']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['date'])
|
""" CCOBRA benchmark functionality.
.. rubric:: Submodules
.. autosummary::
:toctree: _autosummary
ccobra.benchmark.comparators
.. rubric:: Functions
.. autofunction:: dir_context
.. autofunction:: entry_point
.. autofunction:: fix_model_path
.. autofunction:: fix_rel_path
.. autofunction:: main
.. autofunction:: parse_arguments
.. autofunction:: silence_stdout
.. rubric:: Classes
.. autoclass:: Benchmark
:members:
.. autoclass:: ModelInfo
:members:
.. autoclass:: Evaluator
:members:
.. autoclass:: ModelImporter
:members:
.. autoclass:: EvaluationHandler
:members:
"""
from . import comparators
from .benchmark import Benchmark, ModelInfo, fix_rel_path, fix_model_path
from .contextmanager import dir_context
from .evaluator import Evaluator
from .modelimporter import ModelImporter
from .runner import entry_point, parse_arguments, main, silence_stdout
from .evaluation_handler import EvaluationHandler
|
"""
Google web search.
Run queries on Google and return results.
"""
import requests
from kochira import config
from kochira.service import Service, background, Config, coroutine
from kochira.userdata import UserData
service = Service(__name__, __doc__)
@service.config
class Config(Config):
api_key = config.Field(doc="Google API key.")
cx = config.Field(doc="Custom search engine ID.")
@service.command(r"!g (?P<term>.+?)$")
@service.command(r"(?:search for|google) (?P<term>.+?)\??$", mention=True)
@background
def search(ctx, term):
"""
Google.
Search for the given terms on Google.
"""
r = requests.get(
"https://www.googleapis.com/customsearch/v1",
params={
"key": ctx.config.api_key,
"cx": ctx.config.cx,
"q": term
}
).json()
results = r.get("items", [])
if not results:
ctx.respond(ctx._("Couldn't find anything matching \"{term}\".").format(term=term))
return
total = len(results)
ctx.respond(ctx._("({num} of {total}) {title}: {url}").format(
title=results[0]["title"],
url=results[0]["link"],
num=1,
total=total
))
@service.command(r"!image (?P<term>.+?)$")
@service.command(r"image(?: for)? (?P<term>.+?)\??$", mention=True)
@background
def image(ctx, term):
"""
Image search.
Search for the given terms on Google.
"""
r = requests.get(
"https://www.googleapis.com/customsearch/v1",
params={
"key": ctx.config.api_key,
"cx": ctx.config.cx,
"searchType": "image",
"q": term
}
).json()
results = r.get("items", [])
if not results:
ctx.respond(ctx._("Couldn't find anything matching \"{term}\".").format(term=term))
return
total = len(results)
ctx.respond(ctx._("({num} of {total}) {url}").format(
url=results[0]["link"],
num=1,
total=total
))
|
# Creating multiple topics
# Sam suddenly became a black sheep because she is responsible for
# an onslaught of text messages and notifications to department directors.
# No one will go to lunch with her anymore!
# To fix this, she decided to create a general topic per
# department for routine notifications, and a critical topic for urgent notifications.
# Managers will subscribe only to critical notifications,
# while supervisors can monitor general notifications.
# For example, the streets department would have
# 'streets_general' and 'streets_critical' as topics.
# She has initialized the SNS client and
# stored it in the sns variable.
# Help Sam create a tiered topic structure... and have friends again!
# Create list of departments
departments = ['trash', 'streets', 'water']
for dept in departments:
# For every department, create a general topic
sns.create_topic(Name="{}_general".format(dept))
# For every department, create a critical topic
sns.create_topic(Name="{}_critical".format(dept))
# Print all the topics in SNS
response = sns.list_topics()
print(response['Topics'])
# <script.py> output:
# [{'TopicArn': 'arn:aws:sns:us-east-1:123456789012:trash_general'}, {'TopicArn': 'arn:aws:sns:us-east-1:123456789012:trash_critical'}, {'TopicArn': 'arn:aws:sns:us-east-1:123456789012:streets_general'}, {'TopicArn': 'arn:aws:sns:us-east-1:123456789012:streets_critical'}, {'TopicArn': 'arn:aws:sns:us-east-1:123456789012:water_general'}, {'TopicArn': 'arn:aws:sns:us-east-1:123456789012:water_critical'}]
|
from typing import Tuple, List
from flask import jsonify
from flask.wrappers import Response
def wrapped_response(data: dict = None, status: int = 200, message: str = "") -> Tuple[Response, int]:
"""
Create a wrapped response to have uniform json response objects
"""
if type(data) is not dict and data is not None:
raise TypeError("Expected data to be type Dictionary")
response = {
"success": 200 <= status < 300,
"code": status,
"message": message,
"result": data,
}
return jsonify(response), status
def serialize_list(items: List) -> List:
"""Serializes a list of SQLAlchemy Objects, exposing their attributes.
:param items - List of Objects that inherit from Mixin
:returns List of dictionaries
"""
if not items or items is None:
return []
return [x.to_dict() for x in items]
|
# Copyright (c) 2014 Alexander Bredo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
defAWSerrors = dict()
defAWSerrors["AccessDenied"] = ("Access Denied",403)
defAWSerrors["AccountProblem"] = ("There is a problem with your AWS account that prevents the operation from completing successfully. Please use Contact Us.",403)
defAWSerrors["AmbiguousGrantByEmailAddress"] = ("The e-mail address you provided is associated with more than one account.",400)
defAWSerrors["BadDigest"] = ("The Content-MD5 you specified did not match what we received.",400)
defAWSerrors["BucketAlreadyExists"] = ("The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.",409)
defAWSerrors["BucketAlreadyOwnedByYou"] = ("Your previous request to create the named bucket succeeded and you already own it.",409)
defAWSerrors["BucketNotEmpty"] = ("The bucket you tried to delete is not empty.",409)
defAWSerrors["CredentialsNotSupported"] = ("This request does not support credentials.",400)
defAWSerrors["CrossLocationLoggingProhibited"] = ("Cross location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.",403)
defAWSerrors["EntityTooSmall"] = ("Your proposed upload is smaller than the minimum allowed object size.",400)
defAWSerrors["EntityTooLarge"] = ("Your proposed upload exceeds the maximum allowed object size.",400)
defAWSerrors["ExpiredToken"] = ("The provided token has expired.",400)
defAWSerrors["IllegalVersioningConfigurationException"] = ("Indicates that the Versioning configuration specified in the request is invalid.",400)
defAWSerrors["IncompleteBody"] = ("You did not provide the number of bytes specified by the Content-Length HTTP header",400)
defAWSerrors["IncorrectNumberOfFilesInPostRequest"] = ("POST requires exactly one file upload per request.",400)
defAWSerrors["InlineDataTooLarge"] = ("Inline data exceeds the maximum allowed size.",400)
defAWSerrors["InternalError"] = ("We encountered an internal error. Please try again.","500 Internal Server Error")
defAWSerrors["InvalidAccessKeyId"] = ("The AWS Access Key Id you provided does not exist in our records.",403)
defAWSerrors["InvalidAddressingHeader"] = ("You must specify the Anonymous role.",400)
defAWSerrors["InvalidArgument"] = ("Invalid Argument",400)
defAWSerrors["InvalidBucketName"] = ("The specified bucket is not valid.",400)
defAWSerrors["InvalidBucketState"] = ("The request is not valid with the current state of the bucket.",409)
defAWSerrors["InvalidDigest"] = ("The Content-MD5 you specified was an invalid.",400)
defAWSerrors["InvalidLocationConstraint"] = ("The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.",400)
defAWSerrors["InvalidObjectState"] = ("The operation is not valid for the current state of the object.",403)
defAWSerrors["InvalidPart"] = ("One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.",400)
defAWSerrors["InvalidPartOrder"] = ("The list of parts was not in ascending order.Parts list must specified in order by part number.",400)
defAWSerrors["InvalidPayer"] = ("All access to this object has been disabled.",403)
defAWSerrors["InvalidPolicyDocument"] = ("The content of the form does not meet the conditions specified in the policy document.",400)
defAWSerrors["InvalidRange"] = ("The requested range cannot be satisfied.",416)
defAWSerrors["InvalidRequest"] = ("SOAP requests must be made over an HTTPS connection.",400)
defAWSerrors["InvalidSecurity"] = ("The provided security credentials are not valid.",403)
defAWSerrors["InvalidSOAPRequest"] = ("The SOAP request body is invalid.",400)
defAWSerrors["InvalidStorageClass"] = ("The storage class you specified is not valid.",400)
defAWSerrors["InvalidTargetBucketForLogging"] = ("The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.",400)
defAWSerrors["InvalidToken"] = ("The provided token is malformed or otherwise invalid.",400)
defAWSerrors["InvalidURI"] = ("Couldn't parse the specified URI.",400)
defAWSerrors["KeyTooLong"] = ("Your key is too long.",400)
defAWSerrors["MalformedACLError"] = ("The XML you provided was not well-formed or did not validate against our published schema.",400)
defAWSerrors["MalformedPOSTRequest"] = ("The body of your POST request is not well-formed multipart/form-data.",400)
defAWSerrors["MalformedXML"] = ("This happens when the user sends a malformed xml (xml that doesn't conform to the published xsd) for the configuration. The error message is: The XML you provided was not well-formed or did not validate against our published schema.",400)
defAWSerrors["MaxMessageLengthExceeded"] = ("Your request was too big.",400)
defAWSerrors["MaxPostPreDataLengthExceededError"] = ("Your POST request fields preceding the upload file were too large.",400)
defAWSerrors["MetadataTooLarge"] = ("Your metadata headers exceed the maximum allowed metadata size.",400)
defAWSerrors["MethodNotAllowed"] = ("The specified method is not allowed against this resource.",405)
defAWSerrors["MissingAttachment"] = ("A SOAP attachment was expected, but none were found.",400)
defAWSerrors["MissingContentLength"] = ("You must provide the Content-Length HTTP header.",411)
defAWSerrors["MissingRequestBodyError"] = ("This happens when the user sends an empty xml document as a request. The error message is: Request body is empty.",400)
defAWSerrors["MissingSecurityElement"] = ("The SOAP 1.1 request is missing a security element.",400)
defAWSerrors["MissingSecurityHeader"] = ("Your request was missing a required header.",400)
defAWSerrors["NoLoggingStatusForKey"] = ("There is no such thing as a logging status sub-resource for a key.",400)
defAWSerrors["NoSuchBucket"] = ("The specified bucket does not exist.",404)
defAWSerrors["NoSuchKey"] = ("The specified key does not exist.",404)
defAWSerrors["NoSuchLifecycleConfiguration"] = ("The lifecycle configuration does not exist.",404)
defAWSerrors["NoSuchUpload"] = ("The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.",404)
defAWSerrors["NoSuchVersion"] = ("Indicates that the version ID specified in the request does not match an existing version.",404)
defAWSerrors["NotImplemented"] = ("A header you provided implies functionality that is not implemented.",501)
defAWSerrors["NotSignedUp"] = ("Your account is not signed up for the S3 service. You must sign up before you can use S3.",403)
defAWSerrors["NotSuchBucketPolicy"] = ("The specified bucket does not have a bucket policy.",404)
defAWSerrors["OperationAborted"] = ("A conflicting conditional operation is currently in progress against this resource. Please try again.",409)
defAWSerrors["PermanentRedirect"] = ("The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint.",301)
defAWSerrors["PreconditionFailed"] = ("At least one of the preconditions you specified did not hold.",412)
defAWSerrors["Redirect"] = ("Temporary redirect.",307)
defAWSerrors["RestoreAlreadyInProgress"] = ("Object restore is already in progress.",409)
defAWSerrors["RequestIsNotMultiPartContent"] = ("Bucket POST must be of the enclosure-type multipart/form-data.",400)
defAWSerrors["RequestTimeout"] = ("Your socket connection to the server was not read from or written to within the timeout period.",400)
defAWSerrors["RequestTimeTooSkewed"] = ("The difference between the request time and the server's time is too large.",403)
defAWSerrors["RequestTorrentOfBucketError"] = ("Requesting the torrent file of a bucket is not permitted.",400)
defAWSerrors["SignatureDoesNotMatch"] = ("The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. For more information, see REST Authentication and SOAP Authentication for details.",403)
defAWSerrors["ServiceUnavailable"] = ("Please reduce your request rate.",503)
defAWSerrors["SlowDown"] = ("Please reduce your request rate.",503)
defAWSerrors["TemporaryRedirect"] = ("You are being redirected to the bucket while DNS updates.",307)
defAWSerrors["TokenRefreshRequired"] = ("The provided token must be refreshed.",400)
defAWSerrors["TooManyBuckets"] = ("You have attempted to create more buckets than allowed.",400)
defAWSerrors["UnexpectedContent"] = ("This request does not support content.",400)
defAWSerrors["UnresolvableGrantByEmailAddress"] = ("The e-mail address you provided does not match any account on record.",400)
defAWSerrors["UserKeyMustBeSpecified"] = ("The bucket POST must contain the specified field name. If it is specified, please check the order of the fields.",400)
class AWSError():
def __init__(self, key):
if key in defAWSerrors.keys():
self.key = key
else:
self.key = 'AccountProblem'
def getKey(self):
return self.key
def getMessage(self):
return defAWSerrors[self.key][0]
def getCode(self):
return defAWSerrors[self.key][1]
|
#!/usr/bin/env python3
# Test remapping of topic name for incoming message
from mosq_test_helper import *
def write_config(filename, port1, port2, port3):
with open(filename, 'w') as f:
f.write("per_listener_settings true\n")
f.write("port %d\n" % (port2))
f.write("listener %d 127.0.0.1\n" % (port3))
f.write("\n")
f.write("connection bridge_sample\n")
f.write("address 127.0.0.1:%d\n" % (port1))
f.write("bridge_attempt_unsubscribe false\n")
f.write("topic # in 0 local/topic/ remote/topic/\n")
f.write("topic prefix/# in 0 local2/topic/ remote2/topic/\n")
f.write("topic +/value in 0 local3/topic/ remote3/topic/\n")
f.write("topic ic/+ in 0 local4/top remote4/tip\n")
f.write("topic clients/total in 0 test/mosquitto/org $SYS/broker/\n")
f.write("notifications false\n")
f.write("restart_timeout 5\n")
(port1, port2, port3) = mosq_test.get_port(3)
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port1, port2, port3)
rc = 1
keepalive = 60
client_id = socket.gethostname()+".bridge_sample"
connect_packet = mosq_test.gen_connect(client_id, keepalive=keepalive, clean_session=False, proto_ver=128+4)
connack_packet = mosq_test.gen_connack(rc=0)
client_connect_packet = mosq_test.gen_connect("pub-test", keepalive=keepalive)
client_connack_packet = mosq_test.gen_connack(rc=0)
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssock.settimeout(4)
ssock.bind(('', port1))
ssock.listen(5)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port2, use_conf=True)
def test(bridge, sock):
if not mosq_test.expect_packet(bridge, "connect", connect_packet):
return 1
bridge.send(connack_packet)
mid = 0
patterns = [
"remote/topic/#",
"remote2/topic/prefix/#",
"remote3/topic/+/value",
"remote4/tipic/+",
"$SYS/broker/clients/total",
]
for pattern in ("remote/topic/#", "remote2/topic/prefix/#", "remote3/topic/+/value"):
mid += 1
subscribe_packet = mosq_test.gen_subscribe(mid, pattern, 0)
suback_packet = mosq_test.gen_suback(mid, 0)
if not mosq_test.expect_packet(bridge, "subscribe", subscribe_packet):
return 1
bridge.send(suback_packet)
mid += 1
subscribe_packet = mosq_test.gen_subscribe(mid, "#", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
sock.send(subscribe_packet)
if not mosq_test.expect_packet(sock, "suback", suback_packet):
return 1
cases = [
('local/topic/something', 'remote/topic/something'),
('local/topic/some/t/h/i/n/g', 'remote/topic/some/t/h/i/n/g'),
('local/topic/value', 'remote/topic/value'),
# Don't work, #40 must be fixed before
# ('local/topic', 'remote/topic'),
('local2/topic/prefix/something', 'remote2/topic/prefix/something'),
('local3/topic/something/value', 'remote3/topic/something/value'),
('local4/topic/something', 'remote4/tipic/something'),
('test/mosquitto/orgclients/total', '$SYS/broker/clients/total'),
]
for (local_topic, remote_topic) in cases:
mid += 1
remote_publish_packet = mosq_test.gen_publish(
remote_topic, qos=0, mid=mid, payload=''
)
local_publish_packet = mosq_test.gen_publish(
local_topic, qos=0, mid=mid, payload=''
)
bridge.send(remote_publish_packet)
match = mosq_test.expect_packet(sock, "publish", local_publish_packet)
if not match:
print("Fail on cases local_topic=%r, remote_topic=%r" % (
local_topic, remote_topic,
))
return 1
return 0
try:
(bridge, address) = ssock.accept()
bridge.settimeout(2)
sock = mosq_test.do_client_connect(
client_connect_packet, client_connack_packet,
port=port2,
)
rc = test(bridge, sock)
sock.close()
bridge.close()
finally:
os.remove(conf_file)
try:
bridge.close()
except NameError:
pass
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
ssock.close()
exit(rc)
|
# qubit number=3
# total number=84
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=70
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC456.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
def autonomia(carga):
if(carga <= 50000):
return 18000, 19800
elif(carga <= 200000):
return 9000, 9900
else:
return 3000, 3300
carga = int(input())
auto = autonomia(carga)
ax = float(input())
ay = float(input())
bx = float(input())
by = float(input())
dist = (((bx - ax) ** 2) + ((by - ay) ** 2)) ** 0.5
print(round(dist,2))
if(auto[0] >= dist):
print("SIM")
elif(auto[1] >= dist):
print("TALVEZ")
else:
print("NAO")
|
import hail as hl
from hail.typecheck import typecheck
@typecheck(mt=hl.MatrixTable, path=str, batch_size=int, bgzip=bool, header_json_in_file=bool, use_string_key_as_file_name=bool)
def export_entries_by_col(mt: hl.MatrixTable,
path: str,
batch_size: int = 256,
bgzip: bool = True,
header_json_in_file: bool = True,
use_string_key_as_file_name: bool = False):
"""Export entries of the `mt` by column as separate text files.
Examples
--------
>>> range_mt = hl.utils.range_matrix_table(10, 10)
>>> range_mt = range_mt.annotate_entries(x = hl.rand_unif(0, 1))
>>> hl.experimental.export_entries_by_col(range_mt, 'output/cols_files')
Notes
-----
This function writes a directory with one file per column in `mt`. The
files contain one tab-separated field (with header) for each row field
and entry field in `mt`. The column fields of `mt` are written as JSON
in the first line of each file, prefixed with a ``#``.
The above will produce a directory at ``output/cols_files`` with the
following files:
.. code-block:: text
$ ls -l output/cols_files
total 80
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 index.tsv
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-00.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-01.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-02.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-03.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-04.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-05.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-06.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-07.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-08.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-09.tsv.bgz
$ zcat output/cols_files/part-00.tsv.bgz
#{"col_idx":0}
row_idx x
0 6.2501e-02
1 7.0083e-01
2 3.6452e-01
3 4.4170e-01
4 7.9177e-02
5 6.2392e-01
6 5.9920e-01
7 9.7540e-01
8 8.4848e-01
9 3.7423e-01
Due to overhead and file system limits related to having large numbers
of open files, this function will iteratively export groups of columns.
The `batch_size` parameter can control the size of these groups.
Parameters
----------
mt : :class:`.MatrixTable`
path : :obj:`int`
Path (directory to write to.
batch_size : :obj:`int`
Number of columns to write per iteration.
bgzip : :obj:`bool`
BGZip output files.
header_json_in_file : :obj:`bool`
Include JSON header in each component file (if False, only written to index.tsv)
"""
if use_string_key_as_file_name and not (len(mt.col_key) == 1 and mt.col_key[0].dtype == hl.tstr):
raise ValueError(f'parameter "use_string_key_as_file_name" requires a single string column key, found {list(mt.col_key.dtype.values())}')
hl.utils.java.Env.backend().execute(
hl.ir.MatrixToValueApply(mt._mir,
{'name': 'MatrixExportEntriesByCol',
'parallelism': batch_size,
'path': path,
'bgzip': bgzip,
'headerJsonInFile': header_json_in_file,
'useStringKeyAsFileName': use_string_key_as_file_name})
)
|
__all__ = ["YahooFetcher", "QueryBuilder"]
from . import *
|
import ila
from gb_arch import GBArch
from gb_nxt_wri import WRI
from gb_nxt_wr0 import WRU0
from gb_nxt_wr0b import WRU0b
from gb_nxt_wr1 import WRU1
from gb_rdi import defNext as rdDefNext
def defUSts (gb):
m = gb.abst
gb.pre_pix = m.reg ('pre_pix', gb.DATA_SIZE)
gb.pre_pix_nxt = gb.pre_pix
gb.st_ready = m.reg ('st_ready', 1)
gb.st_ready_nxt = gb.st_ready
gb.proc_in = m.reg ('proc_in', gb.slice_size * gb.stencil_size)
gb.proc_in_nxt = gb.proc_in
# Define next state function for each instruction/child-instruction
def defNext (gb):
WRI (gb)
WRU0 (gb)
WRU1 (gb)
# Connect next state function to the abstraction
def setNext (gb):
gb.setNext ()
m = gb.abst
m.set_next ('proc_in', gb.proc_in_nxt)
m.set_next ('pre_pix', gb.pre_pix_nxt)
m.set_next ('st_ready', gb.st_ready_nxt)
if __name__ == '__main__':
gb = GBArch ()
defUSts (gb)
defNext (gb)
rdDefNext (gb)
setNext (gb)
verilogFile = 'gb_verilog_all.v'
gb.exportVerilog (verilogFile)
|
import base64
import datetime
import decimal
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import http, http.server
import socket
import threading
import re
import io
import contextlib
from test import support
from test.support import socket_helper
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
try:
import gzip
except ImportError:
gzip = None
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary(b"my dog has fleas"),
'b64bytes': b"my dog has fleas",
'b64bytearray': bytearray(b"my dog has fleas"),
'boolean': False,
'unicode': '\u4000\u6000\u8000',
'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 2, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 2, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_builtin_types set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MININT-1, dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT),
float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42),
float(xmlrpclib.MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_encoding(self):
value = {'key\u20ac\xa4':
'value\u20ac\xa4'}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = 'method\u20ac\xa4'
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodname=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
def test_dump_bytes(self):
sample = b"my dog has fleas"
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in bytes, bytearray, xmlrpclib.Binary:
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><array>'
'<value><spam/></value>'
'</array></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><struct>'
'<member><name>a</name><value><spam/></value></member>'
'<member><name>b</name><value><spam/></value></member>'
'</struct></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
def check_loads(self, s, value, **kwargs):
dump = '<params><param><value>%s</value></param></params>' % s
result, m = xmlrpclib.loads(dump, **kwargs)
(newvalue,) = result
self.assertEqual(newvalue, value)
self.assertIs(type(newvalue), type(value))
self.assertIsNone(m)
def test_load_standard_types(self):
check = self.check_loads
check('string', 'string')
check('<string>string</string>', 'string')
check('<string>𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string</string>', '𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string')
check('<int>2056183947</int>', 2056183947)
check('<int>-2056183947</int>', -2056183947)
check('<i4>2056183947</i4>', 2056183947)
check('<double>46093.78125</double>', 46093.78125)
check('<boolean>0</boolean>', False)
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
xmlrpclib.Binary(b'\x00byte string\xff'))
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
b'\x00byte string\xff', use_builtin_types=True)
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
xmlrpclib.DateTime('20050210T11:41:23'))
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
datetime.datetime(2005, 2, 10, 11, 41, 23),
use_builtin_types=True)
check('<array><data>'
'<value><int>1</int></value><value><int>2</int></value>'
'</data></array>', [1, 2])
check('<struct>'
'<member><name>b</name><value><int>2</int></value></member>'
'<member><name>a</name><value><int>1</int></value></member>'
'</struct>', {'a': 1, 'b': 2})
def test_load_extension_types(self):
check = self.check_loads
check('<nil/>', None)
check('<ex:nil/>', None)
check('<i1>205</i1>', 205)
check('<i2>20561</i2>', 20561)
check('<i8>9876543210</i8>', 9876543210)
check('<biginteger>98765432100123456789</biginteger>',
98765432100123456789)
check('<float>93.78125</float>', 93.78125)
check('<bigdecimal>9876543210.0123456789</bigdecimal>',
decimal.Decimal('9876543210.0123456789'))
def test_get_host_info(self):
# see bug #3613, this raised a TypeError
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info("user@host.tld"),
('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except OSError:
self.assertTrue(has_ssl)
def test_keepalive_disconnect(self):
class RequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
handled = False
def do_POST(self):
length = int(self.headers.get("Content-Length"))
self.rfile.read(length)
if self.handled:
self.close_connection = True
return
response = xmlrpclib.dumps((5,), methodresponse=True)
response = response.encode()
self.send_response(http.HTTPStatus.OK)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
self.handled = True
self.close_connection = False
def log_message(self, format, *args):
# don't clobber sys.stderr
pass
def run_server():
server.socket.settimeout(float(1)) # Don't hang if client fails
server.handle_request() # First request and attempt at second
server.handle_request() # Retried second request
server = http.server.HTTPServer((socket_helper.HOST, 0), RequestHandler)
self.addCleanup(server.server_close)
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join)
url = "http://{}:{}/".format(*server.server_address)
with xmlrpclib.ServerProxy(url) as p:
self.assertEqual(p.method(), 5)
self.assertEqual(p.method(), 5)
class SimpleXMLRPCDispatcherTestCase(unittest.TestCase):
class DispatchExc(Exception):
"""Raised inside the dispatched functions when checking for
chained exceptions"""
def test_call_registered_func(self):
"""Calls explicitly registered function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
def dispatched_func(*params):
raise self.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(dispatched_func)
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_instance_func(self):
"""Calls a registered instance attribute as a function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
class DispatchedClass:
def dispatched_func(self, *params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(DispatchedClass())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_dispatch_func(self):
"""Calls the registered instance's `_dispatch` function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_method = 'method'
exp_params = 1, 2, 3
class TestInstance:
def _dispatch(self, method, params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(
method, params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(TestInstance())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch(exp_method, exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_method, exp_params))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_registered_func_is_none(self):
"""Calls explicitly registered function which is None"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(None, name='method')
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_instance_has_no_func(self):
"""Attempts to call nonexistent function on a registered instance"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(object())
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_cannot_locate_func(self):
"""Calls a function that the dispatcher cannot locate"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
# this will raise AttributeError because code don't want us to use
# private methods
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time(
[2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %#x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
# datetime vs. DateTime
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
# str vs. DateTime
dstr = now.strftime("%Y%m%dT%H:%M:%S")
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
# some other types
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
self.assertFalse(dtime == 1970)
self.assertTrue(dtime != dbytes)
self.assertFalse(dtime == bytearray(dbytes))
self.assertTrue(dtime != dtuple)
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
self.assertTrue(dtime == ALWAYS_EQ)
self.assertFalse(dtime != ALWAYS_EQ)
self.assertTrue(dtime < LARGEST)
self.assertFalse(dtime > LARGEST)
self.assertTrue(dtime <= LARGEST)
self.assertFalse(dtime >= LARGEST)
self.assertFalse(dtime < SMALLEST)
self.assertTrue(dtime > SMALLEST)
self.assertFalse(dtime <= SMALLEST)
self.assertTrue(dtime >= SMALLEST)
class BinaryTestCase(unittest.TestCase):
# XXX What should str(Binary(b"\xff")) return? I'm choosing "\xff"
# for now (i.e. interpreting the binary data as Latin-1-encoded
# text). But this feels very unsatisfactory. Perhaps we should
# only define repr(), and return r"Binary(b'\xff')" instead?
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, "latin-1"))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, "latin-1"))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, "latin-1"))
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
class Fixture:
@staticmethod
def getData():
return '42'
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
encoding=encoding,
logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x: x, 'têšt')
@serv.register_function
def my_function():
'''This is my function'''
return True
@serv.register_function(name='add')
def _(x, y):
return x + y
testInstance = TestInstanceClass()
serv.register_instance(testInstance, allow_dotted_names=True)
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError("broken dispatcher")
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = [
"/foo", "/foo/bar",
"/foo?k=v", "/foo#frag", "/foo?k=v#frag",
"", "/", "/RPC2", "?k=v", "#frag",
]
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
d.register_function(lambda p=path: p, 'test')
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
serv.add_dispatcher("/is/broken", BrokenDispatcher())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore OSErrors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
thread = threading.Thread(target=self.threadFunc, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_client_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='ascii')
self.assertEqual(p.têšt(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_404(self):
# send POST with http.client, it should return 404 header and
# 'Not Found' message.
with contextlib.closing(http.client.HTTPConnection(ADDR, PORT)) as conn:
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add', 'têšt',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall',
'Fixture'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<class \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_allow_dotted_names_true(self):
# XXX also need allow_dotted_names_false test.
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
data = server.Fixture.getData()
self.assertEqual(data, '42')
def test_unicode_host(self):
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
self.assertEqual(server.add("a", "\xe9"), "a\xe9")
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
with contextlib.closing(socket.create_connection((ADDR, PORT))) as conn:
conn.send('POST /RPC2 HTTP/1.0\r\n'
'Content-Length: 100\r\n\r\n'
'bye HTTP/1.1\r\n'
f'Host: {ADDR}:{PORT}\r\n'
'Accept-Encoding: identity\r\n'
'Content-Length: 0\r\n\r\n'.encode('ascii'))
def test_context_manager(self):
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 3)
self.assertNotEqual(server('transport')._connection,
(None, None))
self.assertEqual(server('transport')._connection,
(None, None))
def test_context_manager_method_error(self):
try:
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, "a")
except xmlrpclib.Fault:
pass
self.assertEqual(server('transport')._connection,
(None, None))
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
def test_server_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_invalid_path(self):
p = xmlrpclib.ServerProxy(URL+"/invalid")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path_query_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v#frag")
self.assertEqual(p.test(), "/foo?k=v#frag")
def test_path_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo#frag")
self.assertEqual(p.test(), "/foo#frag")
def test_path_query(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v")
self.assertEqual(p.test(), "/foo?k=v")
def test_empty_path(self):
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.test(), "/RPC2")
def test_root_path(self):
p = xmlrpclib.ServerProxy(URL + "/")
self.assertEqual(p.test(), "/")
def test_empty_path_query(self):
p = xmlrpclib.ServerProxy(URL + "?k=v")
self.assertEqual(p.test(), "?k=v")
def test_empty_path_fragment(self):
p = xmlrpclib.ServerProxy(URL + "#frag")
self.assertEqual(p.test(), "#frag")
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
p("close")()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
p("close")()
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipUtilTestCase(unittest.TestCase):
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = b'\0' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = b'\0' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegex(ValueError,
"max gzipped payload length exceeded"):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
class HeadersServerTestCase(BaseServerTestCase):
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
test_headers = None
def do_POST(self):
self.__class__.test_headers = self.headers
return super().do_POST()
requestHandler = RequestHandler
standard_headers = [
'Host', 'Accept-Encoding', 'Content-Type', 'User-Agent',
'Content-Length']
def setUp(self):
self.RequestHandler.test_headers = None
return super().setUp()
def assertContainsAdditionalHeaders(self, headers, additional):
expected_keys = sorted(self.standard_headers + list(additional.keys()))
self.assertListEqual(sorted(headers.keys()), expected_keys)
for key, value in additional.items():
self.assertEqual(headers.get(key), value)
def test_header(self):
p = xmlrpclib.ServerProxy(URL, headers=[('X-Test', 'foo')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_many(self):
p = xmlrpclib.ServerProxy(
URL, headers=[('X-Test', 'foo'), ('X-Test-Second', 'bar')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(
headers, {'X-Test': 'foo', 'X-Test-Second': 'bar'})
def test_header_empty(self):
p = xmlrpclib.ServerProxy(URL, headers=[])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {})
def test_header_tuple(self):
p = xmlrpclib.ServerProxy(URL, headers=(('X-Test', 'foo'),))
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_items(self):
p = xmlrpclib.ServerProxy(URL, headers={'X-Test': 'foo'}.items())
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
# Actual value of the URL doesn't matter if it is a string in
# the correct format.
self.url = 'http://fake.localhost'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
thread = threading.Thread(target=http_server, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
# check that flag is false by default
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("X-exception"), expected_err)
self.assertTrue(e.headers.get("X-traceback") is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with support.EnvironmentVarGuard() as env, \
captured_stdout(encoding=self.cgi.encoding) as data_out, \
support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just
# need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search(r'Content-Length: (\d+)', handle).group(1)),
len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
# SimpleXMLRPCDispatcher.__init__ accepts use_builtin_types, which
# makes all dispatch of binary data as bytes instances, and all
# dispatch of datetime argument as datetime.datetime instances.
self.log = []
expected_bytes = b"my dog has fleas"
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(
allow_none=True, encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(("localhost", 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
def setUpModule():
thread_info = support.threading_setup()
unittest.addModuleCleanup(support.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
|
import sys
import DefaultTable
import array
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
class table_T_S_I__5(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
numGlyphs = ttFont['maxp'].numGlyphs
assert len(data) == 2 * numGlyphs
a = array.array("H")
a.fromstring(data)
if sys.byteorder <> "big":
a.byteswap()
self.glyphGrouping = {}
for i in range(numGlyphs):
self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
def compile(self, ttFont):
glyphNames = ttFont.getGlyphOrder()
a = array.array("H")
for i in range(len(glyphNames)):
a.append(self.glyphGrouping[glyphNames[i]])
if sys.byteorder <> "big":
a.byteswap()
return a.tostring()
def toXML(self, writer, ttFont):
names = self.glyphGrouping.keys()
names.sort()
for glyphName in names:
writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName])
writer.newline()
def fromXML(self, (name, attrs, content), ttFont):
if not hasattr(self, "glyphGrouping"):
self.glyphGrouping = {}
if name <> "glyphgroup":
return
self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
|
__all__ = ['setup_targets']
from pathlib import Path
from typing import Final
from build_system.build_target import *
from build_system.compiler import *
from ..meson import *
def setup_targets(root_dir: Path,
targets: list[CompilerInstanceTargets],
cli_mode: bool) -> None:
for compiler_instance_targets in targets:
_setup_compiler_instance_targets(root_dir=root_dir, compiler_instance_targets=compiler_instance_targets, cli_mode=cli_mode)
if cli_mode:
print()
def _setup_compiler_instance_targets(root_dir: Path,
compiler_instance_targets: CompilerInstanceTargets,
cli_mode: bool) -> None:
compiler_instance: Final[CompilerInstance] = compiler_instance_targets.compiler_instance
with compiler_instance.create_env_context_manager() as compiler_env_manager:
for target in compiler_instance_targets.build_targets:
setup_target(root_dir=root_dir,
compiler_instance=compiler_instance,
compiler_env_manager=compiler_env_manager,
build_target=target,
cli_mode=cli_mode)
|
# Copyright (C) 2019-2020, Therapixel SA.
# All rights reserved.
# This file is subject to the terms and conditions described in the
# LICENSE file distributed in this package.
"""The commands module exposes the different command lines methods
that can be used with pacsanini.
"""
from click import echo, group, option
from pacsanini.__version__ import __version__
from pacsanini.cli.config import config_cli
from pacsanini.cli.dashboard import dashboard_cli
from pacsanini.cli.db import db_cli_group
from pacsanini.cli.net import echo_cli, find_cli, move_cli, send_cli, server_cli
from pacsanini.cli.parse import gen_parser, parse
from pacsanini.cli.pipeline import orchestrate_cli
def print_version(ctx, param, value): # pylint: disable=unused-argument
"""Print the program's version."""
if not value or ctx.resilient_parsing:
return
echo(f"Version {__version__}")
ctx.exit()
@group(name="pacsanini")
@option(
"--version", is_flag=True, callback=print_version, expose_value=False, is_eager=True
)
def entry_point(**kwargs):
"""Parse or configure your DICOM tag parsing capabilities
from the command line.
"""
entry_point.add_command(config_cli)
entry_point.add_command(dashboard_cli)
entry_point.add_command(db_cli_group)
entry_point.add_command(echo_cli)
entry_point.add_command(find_cli)
entry_point.add_command(move_cli)
entry_point.add_command(send_cli)
entry_point.add_command(server_cli)
entry_point.add_command(parse)
entry_point.add_command(gen_parser)
entry_point.add_command(orchestrate_cli)
|
# adapted from https://github.com/nadavbh12/VQ-VAE
import numpy as np
import torch
from torch import nn
from torch.autograd import Function, Variable
import torch.nn.functional as F
from config import *
import pdb
class NearestEmbedFunc(Function):
"""
Input:
------
x - (batch_size, emb_dim, *)
Last dimensions may be arbitrary
emb - (emb_dim, num_emb)
"""
@staticmethod
def forward(ctx, input, emb):
# if input.size(1) != emb.size(0):
# raise RuntimeError('invalid argument: input.size(1) ({}) must be equal to emb.size(0) ({})'.
# format(input.size(1), emb.size(0)))
# emb = emb.expand(input.size(1), emb.size(1))
emb_ex = emb.expand(input.size(1), emb.size(1)) # new2
# save sizes for backward
ctx.batch_size = input.size(0)
ctx.num_latents = int(np.prod(np.array(input.size()[2:])))
# ctx.emb_dim = emb.size(0)
# ctx.num_emb = emb.size(1)
ctx.emb_dim = emb_ex.size(0)
ctx.num_emb = emb_ex.size(1) # new2
ctx.input_type = type(input)
ctx.dims = list(range(len(input.size())))
# expand to be broadcast-able
x_expanded = input.unsqueeze(-1)
num_arbitrary_dims = len(ctx.dims) - 2
if num_arbitrary_dims:
# emb_expanded = emb.view(emb.shape[0], *([1] * num_arbitrary_dims), emb.shape[1])
emb_expanded = emb_ex.view(emb_ex.shape[0], *([1] * num_arbitrary_dims), emb_ex.shape[1]) # new2
else:
# emb_expanded = emb
emb_expanded = emb_ex # new2
# find nearest neighbors
# dist = torch.norm(x_expanded - emb_expanded, 2, 1)
dist = torch.pow(x_expanded - emb_expanded, 2) # (batch_size, emb_dim, *, num_emb) # new2
_, argmin = dist.min(-1)
shifted_shape = [input.shape[0], *list(input.shape[2:]) ,input.shape[1]]
# pdb.set_trace()
result = emb.t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, ctx.dims[-1], *ctx.dims[1:-1]) # new2
ctx.save_for_backward(argmin)
return result.contiguous(), argmin
@staticmethod
def backward(ctx, grad_output, argmin=None):
# pdb.set_trace()
grad_input = grad_emb = None
if ctx.needs_input_grad[0]:
grad_input = grad_output
if ctx.needs_input_grad[1]:
argmin, = ctx.saved_variables
latent_indices = torch.arange(ctx.num_emb).type_as(argmin)
idx_choices = (argmin.view(-1, 1) == latent_indices.view(1, -1)).type_as(grad_output.data)
n_idx_choice = idx_choices.sum(0)
n_idx_choice[n_idx_choice == 0] = 1
idx_avg_choices = idx_choices / n_idx_choice
grad_output = grad_output.permute(0, *ctx.dims[2:], 1).contiguous()
grad_output = grad_output.view(ctx.batch_size * ctx.num_latents, ctx.emb_dim)
# pdb.set_trace()
# grad_emb = torch.sum(grad_output.data.view(-1, ctx.emb_dim, 1) *
# idx_avg_choices.view(-1, 1, ctx.num_emb), 0)
grad_emb = torch.sum(grad_output.data.view(-1, 1) *
idx_avg_choices.view(-1, ctx.num_emb), 0, keepdim=True) # new2
return grad_input, grad_emb, None, None
def nearest_embed(x, emb):
return NearestEmbedFunc().apply(x, emb)
class NearestEmbed(nn.Module):
def __init__(self, num_embeddings, embeddings_dim, rd_init=True):
super(NearestEmbed, self).__init__()
if rd_init:
self.weight = nn.Parameter(torch.rand(embeddings_dim, num_embeddings))
else:
# self.weight = nn.Parameter(torch.linspace(0.0, 1.0, num_embeddings).unsqueeze(0).expand(embeddings_dim, num_embeddings))
self.weight = nn.Parameter(torch.linspace(lin_min, lin_max, num_embeddings).unsqueeze(0).expand(embeddings_dim, num_embeddings))
print('Init emb weight:', self.weight.data)
def forward(self, x, weight_sg=False):
"""Input:
---------
x - (batch_size, emb_size, *)
"""
return nearest_embed(x, self.weight.detach() if weight_sg else self.weight)
# adapted from https://github.com/rosinality/vq-vae-2-pytorch/blob/master/vqvae.py#L25
# that adapted from https://github.com/deepmind/sonnet
class NearestEmbedEMA(nn.Module):
def __init__(self, n_emb, emb_dim, decay=0.99, eps=1e-5, rd_init=True):
super(NearestEmbedEMA, self).__init__()
self.decay = decay
self.eps = eps
self.embeddings_dim = emb_dim
self.n_emb = n_emb
self.emb_dim = emb_dim
if rd_init:
embed = torch.rand(emb_dim, n_emb)
else:
# embed = torch.linspace(0.0, 1.0, n_emb).unsqueeze(0).expand(emb_dim, n_emb)
embed = torch.linspace(lin_min, lin_max, n_emb).unsqueeze(0).expand(emb_dim, n_emb)
self.register_buffer('weight', embed)
self.register_buffer('cluster_size', torch.zeros(n_emb))
self.register_buffer('embed_avg', embed.clone())
print('Init emb weight ema:', self.weight.data)
def forward(self, x, weight_sg=None):
"""Input:
---------
x - (batch_size, emb_size, *)
"""
emb_ex = self.weight.expand(x.size(1), self.weight.size(1)) # new2
#emb_avg_ex = self.embed_avg.expand(x.size(1), self.weight.size(1)) # new2
dims = list(range(len(x.size())))
x_expanded = x.unsqueeze(-1)
num_arbitrary_dims = len(dims) - 2
# if num_arbitrary_dims:
# emb_expanded = self.weight.view(self.emb_dim, *([1] * num_arbitrary_dims), self.n_emb)
# else:
# emb_expanded = self.weight
emb_size = x.size(1)
if num_arbitrary_dims:
#emb_expanded = self.weight.expand(emb_size, self.n_emb).view(self.emb_dim, *([1] * num_arbitrary_dims), self.n_emb)
emb_expanded = emb_ex.expand(emb_size, self.n_emb).view(self.emb_dim, *([1] * num_arbitrary_dims), self.n_emb)
else:
#emb_expanded = self.weight.expand(emb_size, self.n_emb)
emb_expanded = emb_ex.expand(emb_size, self.n_emb)
# find nearest neighbors
# dist = torch.norm(x_expanded - emb_expanded, 2, 1)
dist = torch.pow(x_expanded - emb_expanded, 2) # (batch_size, emb_dim, *, num_emb) # new2
_, argmin = dist.min(-1)
shifted_shape = [x.shape[0], *list(x.shape[2:]), x.shape[1]]
# result = emb_ex.t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, dims[-1], *dims[1:-1]) # (batch_size, emb_dim, *, num_emb) # new2
result = self.weight.t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, dims[-1], *dims[1:-1]) # (batch_size, emb_dim, *, num_emb) # new2
# result = self.weight.expand(emb_size, self.n_emb).t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, dims[-1], *dims[1:-1])
if self.training:
latent_indices = torch.arange(self.n_emb).type_as(argmin)
emb_onehot = (argmin.view(-1, 1) == latent_indices.view(1, -1)).type_as(x.data)
n_idx_choice = emb_onehot.sum(0)
n_idx_choice[n_idx_choice == 0] = 1
# pdb.set_trace()
# flatten = x.permute(1, 0, *dims[-2:]).contiguous().view(x.shape[1], -1)
num_arbitrary_dims = len(dims) - 2
if num_arbitrary_dims:
# flatten = x.permute(1, 0, *dims[-2:]).contiguous().view(x.shape[1], -1)
# flatten = x.permute(1, 0, *dims[-2:]).contiguous().view(1, -1)
flatten = x.view(1, -1)
else:
# flatten = x.permute(1, 0).contiguous()
# flatten = x.permute(1, 0).contiguous().view(1, -1)
flatten = x.view(1, -1)
self.cluster_size.data.mul_(self.decay).add_(
1 - self.decay, n_idx_choice
)
# pdb.set_trace()
embed_sum = flatten @ emb_onehot # -----dc0.99
# embed_sum = torch.pow(flatten.t() - emb_onehot, 2).mean(0) # ----dc0.99_s
#pdb.set_trace()
self.embed_avg.data.mul_(self.decay).add_(1 - self.decay, embed_sum)
#emb_avg_ex.data.mul_(self.decay).add_(1 - self.decay, embed_sum)
#pdb.set_trace()
n = self.cluster_size.sum()
cluster_size = (
(self.cluster_size + self.eps) / (n + self.n_emb * self.eps) * n
)
embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)
self.weight.data.copy_(embed_normalized) # ---dc0.99
# self.weight.data.copy_(self.embed_avg) # -------dc0.99_s
#embed_normalized = emb_avg_ex / cluster_size.unsqueeze(0)
#self.weight.data.copy_(embed_normalized.mean(0, keepdim=True))
#self.embed_avg.data.copy_(emb_avg_ex.mean(0, keepdim=True))
return result, argmin
|
#!C:\Devel\Bankera\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
from aiida.engine import calcfunction
from aiida.orm import Int
@calcfunction
def sum_and_difference(alpha, beta):
return {'sum': alpha + beta, 'difference': alpha - beta}
result = sum_and_difference(Int(1), Int(2))
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for Stackoverflow."""
from typing import List
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
EVAL_BATCH_SIZE = 100
def create_vocab(vocab_size):
"""Creates vocab from `vocab_size` most common words in Stackoverflow."""
vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts()
return list(vocab_dict.keys())[:vocab_size]
def split_input_target(chunk):
"""Generate input and target data.
The task of language model is to predict the next word.
Args:
chunk: A Tensor of text data.
Returns:
A namedtuple of input and target data.
"""
input_text = tf.map_fn(lambda x: x[:-1], chunk)
target_text = tf.map_fn(lambda x: x[1:], chunk)
return (input_text, target_text)
def build_to_ids_fn(vocab, max_seq_len):
"""Constructs function mapping examples to sequences of token indices."""
_, _, bos, eos = get_special_tokens(len(vocab))
table_values = np.arange(len(vocab), dtype=np.int64)
table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(vocab, table_values),
num_oov_buckets=1)
def to_ids(example):
sentence = tf.reshape(example['tokens'], shape=[1])
words = tf.strings.split(sentence, sep=' ').values
truncated_words = words[:max_seq_len]
tokens = table.lookup(truncated_words) + 1
tokens = tf.cond(
tf.less(tf.size(tokens), max_seq_len),
lambda: tf.concat([tokens, [eos]], 0), lambda: tokens)
return tf.concat([[bos], tokens], 0)
return to_ids
def batch_and_split(dataset, max_seq_len, batch_size):
return dataset.padded_batch(
batch_size, padded_shapes=[max_seq_len + 1]).map(
split_input_target, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def get_special_tokens(vocab_size):
"""Gets tokens dataset preprocessing code will add to Stackoverflow."""
pad = 0
oov = vocab_size + 1
bos = vocab_size + 2
eos = vocab_size + 3
return pad, oov, bos, eos
def create_train_dataset_preprocess_fn(vocab: List[str],
client_batch_size: int,
client_epochs_per_round: int,
max_seq_len: int,
max_training_elements_per_user: int,
max_shuffle_buffer_size=10000):
"""Creates preprocessing functions for stackoverflow data.
This function returns a function which takes a dataset and returns a dataset,
generally for mapping over a set of unprocessed client datasets during
training.
Args:
vocab: Vocabulary which defines the embedding.
client_batch_size: Integer representing batch size to use on the clients.
client_epochs_per_round: Number of epochs for which to repeat train client
dataset.
max_seq_len: Integer determining shape of padded batches. Sequences will be
padded up to this length, and sentences longer than `max_seq_len` will be
truncated to this length.
max_training_elements_per_user: Integer controlling the maximum number of
elements to take per user. If -1, takes all elements for each user.
max_shuffle_buffer_size: Maximum shuffle buffer size.
Returns:
Two functions, the first `preprocess_train` and the second
`preprocess_val_and_test`, as described above.
"""
if client_batch_size <= 0:
raise ValueError('client_batch_size must be a positive integer; you have '
'passed {}'.format(client_batch_size))
elif client_epochs_per_round <= 0:
raise ValueError('client_epochs_per_round must be a positive integer; you '
'have passed {}'.format(client_epochs_per_round))
elif max_seq_len <= 0:
raise ValueError('max_seq_len must be a positive integer; you have '
'passed {}'.format(max_seq_len))
elif max_training_elements_per_user < -1:
raise ValueError(
'max_training_elements_per_user must be an integer at '
'least -1; you have passed {}'.format(max_training_elements_per_user))
if (max_training_elements_per_user == -1 or
max_training_elements_per_user > max_shuffle_buffer_size):
shuffle_buffer_size = max_shuffle_buffer_size
else:
shuffle_buffer_size = max_training_elements_per_user
# TODO(b/155408842): need further investigation on why `tff.tf_compuation`
# decorator causes b/153363900 for `to_ids`, and large memory consumption.
def preprocess_train(dataset):
to_ids = build_to_ids_fn(vocab, max_seq_len)
dataset = dataset.take(max_training_elements_per_user)
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.repeat(client_epochs_per_round)
dataset = dataset.map(
to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return batch_and_split(dataset, max_seq_len, client_batch_size)
return preprocess_train
def create_test_dataset_preprocess_fn(vocab: List[str], max_seq_len: int):
"""Creates preprocessing functions for stackoverflow data.
This function returns a function which represents preprocessing logic
for use on centralized validation and test datasets outside of TFF.
Args:
vocab: Vocabulary which defines the embedding.
max_seq_len: Integer determining shape of padded batches. Sequences will be
padded up to this length, and sentences longer than `max_seq_len` will be
truncated to this length.
Returns:
`preprocess_val_and_test`, as described above.
"""
if max_seq_len <= 0:
raise ValueError('max_seq_len must be a positive integer; you have '
'passed {}'.format(max_seq_len))
def preprocess_val_and_test(dataset):
to_ids = build_to_ids_fn(vocab, max_seq_len)
id_dataset = dataset.map(
to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return batch_and_split(id_dataset, max_seq_len, EVAL_BATCH_SIZE)
return preprocess_val_and_test
def construct_word_level_datasets(vocab_size: int,
client_batch_size: int,
client_epochs_per_round: int,
max_seq_len: int,
max_training_elements_per_user: int,
num_validation_examples: int,
max_shuffle_buffer_size=10000):
"""Preprocessing for Stackoverflow data.
Notice that this preprocessing function *ignores* the heldout Stackoverflow
dataset for consistency with the other datasets in the proposed optimization
paper, and returns a validation/test split of the Stackoverflow "test" data,
containing more examples from users in the Stackoverflow train dataset.
Args:
vocab_size: Integer representing size of the vocab to use. Vocabulary will
then be the `vocab_size` most frequent words in the Stackoverflow dataset.
client_batch_size: Integer representing batch size to use on the clients.
client_epochs_per_round: Number of epochs for which to repeat train client
dataset.
max_seq_len: Integer determining shape of padded batches. Sequences will be
padded up to this length, and sentences longer than `max_seq_len` will be
truncated to this length.
max_training_elements_per_user: Integer controlling the maximum number of
elements to take per user. If -1, takes all elements for each user.
num_validation_examples: Number of examples from Stackoverflow test set to
use for validation on each round.
max_shuffle_buffer_size: Maximum shuffle buffer size.
Returns:
stackoverflow_train: An instance of `tff.simulation.ClientData`
representing Stackoverflow data for training.
stackoverflow_validation: A split of the Stackoverflow Test data as outlined
in `tff.simulation.datasets.stackoverflow`, containing at most
`num_validation_examples` examples.
stackoverflow_test: A split of the same Stackoverflow Test data containing
the examples not used in `stackoverflow_validation`.
"""
if num_validation_examples < 1:
raise ValueError(
'num_validation_examples must be an integer at '
'least 1; you have passed {}'.format(num_validation_examples))
elif vocab_size <= 0:
raise ValueError('vocab_size must be a positive integer; you have '
'passed {}'.format(vocab_size))
(stackoverflow_train, _,
stackoverflow_test) = tff.simulation.datasets.stackoverflow.load_data()
vocab = create_vocab(vocab_size)
raw_test_dataset = stackoverflow_test.create_tf_dataset_from_all_clients()
preprocess_train = create_train_dataset_preprocess_fn(
vocab, client_batch_size, client_epochs_per_round, max_seq_len,
max_training_elements_per_user, max_shuffle_buffer_size)
preprocess_val_and_test = create_test_dataset_preprocess_fn(
vocab, max_seq_len)
stackoverflow_train = stackoverflow_train.preprocess(preprocess_train)
stackoverflow_val = preprocess_val_and_test(
raw_test_dataset.take(num_validation_examples))
stackoverflow_test = preprocess_val_and_test(
raw_test_dataset.skip(num_validation_examples))
return stackoverflow_train, stackoverflow_val, stackoverflow_test
def get_centralized_train_dataset(vocab_size: int,
batch_size: int,
max_seq_len: int,
shuffle_buffer_size: int = 10000):
"""Creates centralized approximately shuffled train dataset."""
vocab = create_vocab(vocab_size)
to_ids = build_to_ids_fn(vocab, max_seq_len)
train, _, _ = tff.simulation.datasets.stackoverflow.load_data()
train = train.create_tf_dataset_from_all_clients()
train = train.shuffle(buffer_size=shuffle_buffer_size)
return batch_and_split(
train.map(to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE),
max_seq_len, batch_size)
|
import glob
import subprocess
from setuptools import setup, find_packages, Extension
def build_libs():
subprocess.call(['cmake', '.'])
subprocess.call(['make'])
build_libs()
setup(
name='jetbot',
version='0.3.0',
description='An open-source robot based on NVIDIA Jetson Nano',
packages=find_packages(),
install_requires=[
'Adafruit_MotorHat',
'Adafruit-SSD1306',
],
package_data={'jetbot': ['ssd_tensorrt/*.so']},
)
|
import cjb.uif
from cjb.uif.views import Label
from viz.layout import buttonSize
class BaseScene(cjb.uif.Scene):
def __init__(self, ui, key = None):
self.ui = ui
self.scroller = None
cjb.uif.Scene.__init__(self, ui.manager, key or self.__class__.__name__)
self.container.properties['sendKeys'] = 1
def build(self):
# common UI
if self.key != 'Home':
self.targetButtons([self.ui.home, self.back])
def layout(self, view):
# common layout
home = self.buttonWithKey('home')
if home:
home.frame = view.frame.bottomRightSubrect(size = buttonSize, margin = 10)
back = self.buttonWithKey('back')
if back:
back.frame = view.frame.topRightSubrect(size = buttonSize, margin = 10)
return view
def back(self, message = None):
self.ui.popScene()
def addLabel(self, txt, bg = None):
return self.addView(Label(txt, fontSize = 11, bg = bg))
def addModelView(self, obj):
return self.addView(cjb.uif.views.Button(obj = obj))
def addModelViews(self, objs):
map(self.addModelView, objs)
def handleViewMessage(self, scene, obj, message):
if obj:
if isinstance(obj, Relationship):
self.showRelationship(obj)
#...
else:
print("App got message to " + str(obj) + ": " + str(message))
elif message.get('event') == 'key':
self.handleKeyEvent(message["arg"])
else:
print("App got general message: " + str(message))
def handleKeyEvent(self, keyInfo):
if keyInfo["t"] == "h" and 1 == len(keyInfo):
self.ui.home()
elif keyInfo["t"] == "b" and 1 == len(keyInfo):
self.back()
else:
print("Unhandled key: " + str(keyInfo))
|
"""Unit tests for pydot drawing functions."""
try:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys
import tempfile
from nose.tools import assert_equal, assert_is_instance, assert_true
import networkx as nx
from networkx.testing import assert_graphs_equal
class TestPydot(object):
@classmethod
def setupClass(cls):
'''
Fixture defining the `pydot` global to be the `pydot` module if both
importable and of sufficient version _or_ skipping this test.
'''
global pydot
pydot = nx.nx_pydot.setup_module(sys.modules[__name__])
assert pydot is not None
def pydot_checks(self, G, prog):
'''
Validate :mod:`pydot`-based usage of the passed NetworkX graph with the
passed basename of an external GraphViz command (e.g., `dot`, `neato`).
'''
# Set the name of this graph to... "G". Failing to do so will
# subsequently trip an assertion expecting this name.
G.graph['name'] = 'G'
# Add arbitrary nodes and edges to the passed empty graph.
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('A', 'D')])
G.add_node('E')
# Validate layout of this graph with the passed GraphViz command.
graph_layout = nx.nx_pydot.pydot_layout(G, prog=prog)
assert_is_instance(graph_layout, dict)
# Convert this graph into a "pydot.Dot" instance.
P = nx.nx_pydot.to_pydot(G)
# Convert this "pydot.Dot" instance back into a graph of the same type.
G2 = G.__class__(nx.nx_pydot.from_pydot(P))
# Validate the original and resulting graphs to be the same.
assert_graphs_equal(G, G2)
# Serialize this "pydot.Dot" instance to a temporary file in dot format.
fname = tempfile.mktemp()
P.write_raw(fname)
# Deserialize a list of new "pydot.Dot" instances back from this file.
Pin_list = pydot.graph_from_dot_file(path=fname, encoding='utf-8')
# Validate this file to contain only one graph.
assert_equal(len(Pin_list), 1)
# The single "pydot.Dot" instance deserialized from this file.
Pin = Pin_list[0]
# Sorted list of all nodes in the original "pydot.Dot" instance.
n1 = sorted([p.get_name() for p in P.get_node_list()])
# Sorted list of all nodes in the deserialized "pydot.Dot" instance.
n2 = sorted([p.get_name() for p in Pin.get_node_list()])
# Validate these instances to contain the same nodes.
assert_equal(n1, n2)
# Sorted list of all edges in the original "pydot.Dot" instance.
e1 = sorted([
(e.get_source(), e.get_destination()) for e in P.get_edge_list()])
# Sorted list of all edges in the original "pydot.Dot" instance.
e2 = sorted([
(e.get_source(), e.get_destination()) for e in Pin.get_edge_list()])
# Validate these instances to contain the same edges.
assert_equal(e1, e2)
# Deserialize a new graph of the same type back from this file.
Hin = nx.nx_pydot.read_dot(fname)
Hin = G.__class__(Hin)
# Validate the original and resulting graphs to be the same.
assert_graphs_equal(G, Hin)
def test_undirected(self):
self.pydot_checks(nx.Graph(), prog='neato')
def test_directed(self):
self.pydot_checks(nx.DiGraph(), prog='dot')
def test_read_write(self):
G = nx.MultiGraph()
G.graph['name'] = 'G'
G.add_edge('1', '2', key='0') # read assumes strings
fh = StringIO()
nx.nx_pydot.write_dot(G, fh)
fh.seek(0)
H = nx.nx_pydot.read_dot(fh)
assert_graphs_equal(G, H)
|
class Stack:
#initialize stack and top
def __init__(self,max_size=None):
self.__stack = []
self.__max_size = max_size
self.__top = 0
#current length of stack
def __len__(self):
return len(self.__stack)
#check if stack is empty
def is_empty(self):
return True if self.__top==0 else False
#check if stack is full
def is_full(self):
return True if self.__max_size and self.__max_size==self.__top else False
#retrieve stack
def get(self,index=None):
if index is not None:
return self.__stack[index]
return self.__stack
#add item to stack
def push(self,x):
if self.is_full():
print 'Overflow'
return None
else:
self.__stack.append(x)
self.__top+=1
#remove item from stack
def pop(self):
if self.is_empty():
print 'Underflow'
return None
else:
self.__top-=1
return self.__stack.pop()
#show item on top of stack
def peek(self):
if self.is_empty():
print 'Empty Stack'
return
else:
return self.__stack[-1]
|
# Generated by Django 3.0.8 on 2021-01-15 13:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Gallery', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='imageclient',
old_name='product',
new_name='client',
),
]
|
# Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import shutil
import abc
import docker
from tqdm import tqdm
from emu.utils import mkdir_p
class ProgressTracker(object):
"""Tracks progress using tqdm for a set of layers that are pushed."""
def __init__(self):
# This tracks the information for a given layer id.
self.progress = {}
self.idx = -1
def __del__(self):
for k in self.progress:
self.progress[k]["tqdm"].close()
def update(self, entry):
"""Update the progress bars given a an entry.."""
if "id" not in entry:
return
identity = entry["id"]
if identity not in self.progress:
self.idx += 1
self.progress[identity] = {
"tqdm": tqdm(total=0, position=self.idx, unit="B", unit_scale=True), # The progress bar
"total": 0, # Total of bytes we are shipping
"status": "", # Status message.
"current": 0, # Current of total already send.
}
prog = self.progress[identity]
total = int(entry.get("progressDetail", {}).get("total", -1))
current = int(entry.get("progressDetail", {}).get("current", 0))
if prog["total"] != total and total != -1:
prog["total"] = total
prog["tqdm"].reset(total=total)
if prog["status"] != entry["status"]:
prog["tqdm"].set_description("{0} {1}".format(entry.get("status"), identity))
if current != 0:
diff = current - prog["current"]
prog["current"] = current
prog["tqdm"].update(diff)
class DockerContainer(object):
"""A Docker Device is capable of creating and launching docker images.
In order to successfully create and launch a docker image you must either
run this as root, or have enabled sudoless docker.
"""
TAG_REGEX = re.compile(r"[a-zA-Z0-9][a-zA-Z0-9._-]*:?[a-zA-Z0-9._-]*")
def __init__(self, repo=None):
if repo and repo[-1] != "/":
repo += "/"
self.repo = repo
def get_client(self):
return docker.from_env()
def get_api_client(self):
try:
api_client = docker.APIClient()
logging.info(api_client.version())
return api_client
except:
logging.exception("Failed to create default client, trying domain socket.", exc_info=True)
api_client = docker.APIClient(base_url="unix://var/run/docker.sock")
logging.info(api_client.version())
return api_client
def push(self):
image = self.full_name()
print("Pushing docker image: {}.. be patient this can take a while!".format(self.full_name()))
tracker = ProgressTracker()
try:
client = docker.from_env()
result = client.images.push(image, "latest", stream=True, decode=True)
for entry in result:
tracker.update(entry)
self.docker_image().tag("{}{}:latest".format(self.repo, self.image_name()))
except:
logging.exception("Failed to push image.", exc_info=True)
logging.warning("You can manually push the image as follows:")
logging.warning("docker push %s", image)
def launch(self, port_map):
"""Launches the container with the given sha, publishing abd on port, and gRPC on port 8554
Returns the container.
"""
image = self.docker_image()
client = docker.from_env()
try:
container = client.containers.run(
image=image.id,
privileged=True,
publish_all_ports=True,
detach=True,
ports=port_map,
)
print("Launched {} (id:{})".format(container.name, container.id))
print("docker logs -f {}".format(container.name))
print("docker stop {}".format(container.name))
return container
except:
logging.exception("Unable to run the %s", image_sha)
print("Unable to start the container, try running it as:")
print("./run.sh ", image_sha)
def create_container(self, dest):
"""Creates the docker container, returning the sha of the container, or None in case of failure."""
identity = None
image_tag = self.full_name()
print("docker build {} -t {}".format(dest, image_tag))
try:
api_client = self.get_api_client()
logging.info("build(path=%s, tag=%s, rm=True, decode=True)", dest, image_tag)
result = api_client.build(path=dest, tag=image_tag, rm=True, decode=True)
for entry in result:
if "stream" in entry:
sys.stdout.write(entry["stream"])
if "aux" in entry and "ID" in entry["aux"]:
identity = entry["aux"]["ID"]
client = docker.from_env()
image = client.images.get(identity)
image.tag(self.repo + self.image_name(), "latest")
except:
logging.exception("Failed to create container.", exc_info=True)
logging.warning("You can manually create the container as follows:")
logging.warning("docker build -t %s %s", image_tag, dest)
return identity
def clean(self, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
mkdir_p(dest)
def pull(self, image, tag):
"""Tries to retrieve the given image and tag.
Return True if succeeded, False when failed.
"""
client = self.get_api_client()
try:
tracker = ProgressTracker()
result = client.pull(self.repo + image, tag)
for entry in result:
tracker.update(entry)
except:
logging.info("Failed to retrieve image, this is not uncommon.", exc_info=True)
return False
return True
def full_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), self.docker_tag())
return (self.image_name(), self.docker_tag())
def latest_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), "latest")
return (self.image_name(), "latest")
def create_cloud_build_step(self, dest):
return {
"name": "gcr.io/cloud-builders/docker",
"args": [
"build",
"-t",
self.full_name(),
"-t",
self.latest_name(),
os.path.basename(dest),
],
}
def docker_image(self):
"""The docker local docker image if any
Returns:
{docker.models.images.Image}: A docker image object, or None.
"""
client = self.get_client()
for img in client.images.list():
for tag in img.tags:
if self.image_name() in tag:
return img
return None
def available(self):
"""True if this container image is locally available."""
return self.docker_image() != None
def build(self, dest):
self.write(dest)
return self.create_container(dest)
def can_pull(self):
"""True if this container image can be pulled from a registry."""
return self.pull(self.image_name(), self.docker_tag())
@abc.abstractmethod
def write(self, destination):
"""Method responsible for writing the Dockerfile and all necessary files to build a container.
Args:
destination ({string}): A path to a directory where all the container files should reside.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def image_name(self):
"""The image name without the tag used to uniquely identify this image.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def docker_tag(self):
raise NotImplementedError()
@abc.abstractmethod
def depends_on(self):
"""Name of the system image this container is build on."""
raise NotImplementedError()
def __str__(self):
return self.image_name() + ":" + self.docker_tag()
|
# -*- coding: utf-8 -*-
"""Canonical correlation analysis
author: Yichuan Liu
"""
import numpy as np
from numpy.linalg import svd
import scipy
import pandas as pd
from statsmodels.base.model import Model
from statsmodels.iolib import summary2
from .multivariate_ols import multivariate_stats
class CanCorr(Model):
"""
Canonical correlation analysis using singular value decomposition
For matrices exog=x and endog=y, find projections x_cancoef and y_cancoef
such that:
x1 = x * x_cancoef, x1' * x1 is identity matrix
y1 = y * y_cancoef, y1' * y1 is identity matrix
and the correlation between x1 and y1 is maximized.
Attributes
----------
endog : ndarray
See Parameters.
exog : ndarray
See Parameters.
cancorr : ndarray
The canonical correlation values
y_cancoeff : ndarray
The canonical coefficients for endog
x_cancoeff : ndarray
The canonical coefficients for exog
References
----------
.. [*] http://numerical.recipes/whp/notes/CanonCorrBySVD.pdf
.. [*] http://www.csun.edu/~ata20315/psy524/docs/Psy524%20Lecture%208%20CC.pdf
.. [*] http://www.mathematica-journal.com/2014/06/canonical-correlation-analysis/
""" # noqa:E501
def __init__(self, endog, exog, tolerance=1e-8, missing='none', hasconst=None, **kwargs):
super(CanCorr, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
self._fit(tolerance)
def _fit(self, tolerance=1e-8):
"""Fit the model
A ValueError is raised if there are singular values smaller than the
tolerance. The treatment of singular arrays might change in future.
Parameters
----------
tolerance : float
eigenvalue tolerance, values smaller than which is considered 0
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
k = np.min([k_yvar, k_xvar])
x = np.array(self.exog)
x = x - x.mean(0)
y = np.array(self.endog)
y = y - y.mean(0)
ux, sx, vx = svd(x, 0)
# vx_ds = vx.T divided by sx
vx_ds = vx.T
mask = sx > tolerance
if mask.sum() < len(mask):
raise ValueError('exog is collinear.')
vx_ds[:, mask] /= sx[mask]
uy, sy, vy = svd(y, 0)
# vy_ds = vy.T divided by sy
vy_ds = vy.T
mask = sy > tolerance
if mask.sum() < len(mask):
raise ValueError('endog is collinear.')
vy_ds[:, mask] /= sy[mask]
u, s, v = svd(ux.T.dot(uy), 0)
# Correct any roundoff
self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])
self.x_cancoef = vx_ds.dot(u[:, :k])
self.y_cancoef = vy_ds.dot(v.T[:, :k])
def corr_test(self):
"""Approximate F test
Perform multivariate statistical tests of the hypothesis that
there is no canonical correlation between endog and exog.
For each canonical correlation, testing its significance based on
Wilks' lambda.
Returns
-------
CanCorrTestResults instance
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
eigenvals = np.power(self.cancorr, 2)
stats = pd.DataFrame(columns=['Canonical Correlation', "Wilks' lambda",
'Num DF','Den DF', 'F Value','Pr > F'],
index=list(range(len(eigenvals) - 1, -1, -1)))
prod = 1
for i in range(len(eigenvals) - 1, -1, -1):
prod *= 1 - eigenvals[i]
p = k_yvar - i
q = k_xvar - i
r = (nobs - k_yvar - 1) - (p - q + 1) / 2
u = (p * q - 2) / 4
df1 = p * q
if p ** 2 + q ** 2 - 5 > 0:
t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))
else:
t = 1
df2 = r * t - 2 * u
lmd = np.power(prod, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]
stats.loc[i, "Wilks' lambda"] = prod
stats.loc[i, 'Num DF'] = df1
stats.loc[i, 'Den DF'] = df2
stats.loc[i, 'F Value'] = F
pval = scipy.stats.f.sf(F, df1, df2)
stats.loc[i, 'Pr > F'] = pval
'''
# Wilk's Chi square test of each canonical correlation
df = (p - i + 1) * (q - i + 1)
chi2 = a * np.log(prod)
pval = stats.chi2.sf(chi2, df)
stats.loc[i, 'Canonical correlation'] = self.cancorr[i]
stats.loc[i, 'Chi-square'] = chi2
stats.loc[i, 'DF'] = df
stats.loc[i, 'Pr > ChiSq'] = pval
'''
ind = stats.index.values[::-1]
stats = stats.loc[ind, :]
# Multivariate tests (remember x has mean removed)
stats_mv = multivariate_stats(eigenvals,
k_yvar, k_xvar, nobs - k_xvar - 1)
return CanCorrTestResults(stats, stats_mv)
class CanCorrTestResults:
"""
Canonical correlation results class
Attributes
----------
stats : DataFrame
Contain statistical tests results for each canonical correlation
stats_mv : DataFrame
Contain the multivariate statistical tests results
"""
def __init__(self, stats, stats_mv):
self.stats = stats
self.stats_mv = stats_mv
def __str__(self):
return self.summary().__str__()
def summary(self):
summ = summary2.Summary()
summ.add_title('Cancorr results')
summ.add_df(self.stats)
summ.add_dict({'': ''})
summ.add_dict({'Multivariate Statistics and F Approximations': ''})
summ.add_df(self.stats_mv)
return summ
|
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from color import Coloring
from command import Command
class BranchColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'branch')
self.current = self.printer('current', fg='green')
self.local = self.printer('local')
self.notinproject = self.printer('notinproject', fg='red')
class BranchInfo(object):
def __init__(self, name):
self.name = name
self.current = 0
self.published = 0
self.published_equal = 0
self.projects = []
def add(self, b):
if b.current:
self.current += 1
if b.published:
self.published += 1
if b.revision == b.published:
self.published_equal += 1
self.projects.append(b)
@property
def IsCurrent(self):
return self.current > 0
@property
def IsPublished(self):
return self.published > 0
@property
def IsPublishedEqual(self):
return self.published_equal == len(self.projects)
class Branches(Command):
common = True
helpSummary = "View current topic branches"
helpUsage = """
%prog [<project>...]
Summarizes the currently available topic branches.
Branch Display
--------------
The branch display output by this command is organized into four
columns of information; for example:
*P nocolor | in repo
repo2 |
The first column contains a * if the branch is the currently
checked out branch in any of the specified projects, or a blank
if no project has the branch checked out.
The second column contains either blank, p or P, depending upon
the upload status of the branch.
(blank): branch not yet published by repo upload
P: all commits were published by repo upload
p: only some commits were published by repo upload
The third column contains the branch name.
The fourth column (after the | separator) lists the projects that
the branch appears in, or does not appear in. If no project list
is shown, then the branch appears in all projects.
"""
def Execute(self, opt, args):
projects = self.GetProjects(args)
out = BranchColoring(self.manifest.manifestProject.config)
all_branches = {}
project_cnt = len(projects)
for project in projects:
for name, b in project.GetBranches().items():
b.project = project
if name not in all_branches:
all_branches[name] = BranchInfo(name)
all_branches[name].add(b)
names = list(sorted(all_branches))
if not names:
print(' (no branches)', file=sys.stderr)
return
width = 25
for name in names:
if width < len(name):
width = len(name)
for name in names:
i = all_branches[name]
in_cnt = len(i.projects)
if i.IsCurrent:
current = '*'
hdr = out.current
else:
current = ' '
hdr = out.local
if i.IsPublishedEqual:
published = 'P'
elif i.IsPublished:
published = 'p'
else:
published = ' '
hdr('%c%c %-*s' % (current, published, width, name))
out.write(' |')
if in_cnt < project_cnt:
fmt = out.write
paths = []
if in_cnt < project_cnt - in_cnt:
in_type = 'in'
for b in i.projects:
paths.append(b.project.relpath)
else:
fmt = out.notinproject
in_type = 'not in'
have = set()
for b in i.projects:
have.add(b.project)
for p in projects:
if not p in have:
paths.append(p.relpath)
s = ' %s %s' % (in_type, ', '.join(paths))
if width + 7 + len(s) < 80:
fmt(s)
else:
fmt(' %s:' % in_type)
for p in paths:
out.nl()
fmt(width*' ' + ' %s' % p)
else:
out.write(' in all projects')
out.nl()
|
import numpy as np
def amaze_demosaic(src, raw):
cfarray = raw.raw_colors
cfarray[cfarray == 3] = 1
rgb = amaze_demosaic_libraw(src, cfarray, raw.daylight_whitebalance)
return rgb
def amaze_demosaic_libraw(src, cfarray, daylight_wb):
TS = 512
winx = winy = 0
width = src.shape[1]
height = src.shape[0]
image = np.empty([height, width, 3], dtype=np.uint16)
clip_pt = min(daylight_wb[0], daylight_wb[1], daylight_wb[2])
v1 = TS
v2 = 2 * TS
v3 = 3 * TS
p1 = -TS + 1
p2 = -2 * TS + 2
p3 = -3 * TS + 3
m1 = TS + 1
m2 = 2 * TS + 2
m3 = 3 * TS + 3
nbr = [-v2,-2,2,v2,0]
eps, epssq = 1e-5, 1e-10
# adaptive ratios threshold
arthresh=0.75
# nyquist texture test threshold
nyqthresh=0.5
# diagonal interpolation test threshold
pmthresh=0.25
# factors for bounding interpolation in saturated regions
lbd, ubd = 1, 1 # lbd=0.66, ubd=1.5 alternative values;
# gaussian on 5x5 quincunx, sigma=1.2
gaussodd = [0.14659727707323927, 0.103592713382435, 0.0732036125103057, 0.0365543548389495]
# gaussian on 5x5, sigma=1.2
gaussgrad = [0.07384411893421103, 0.06207511968171489, 0.0521818194747806, 0.03687419286733595, 0.03099732204057846, 0.018413194161458882]
# gaussian on 3x3, sigma =0.7
gauss1 = [0.3376688223162362, 0.12171198028231786, 0.04387081413862306]
# gaussian on 5x5 alt quincunx, sigma=1.5
gausseven = [0.13719494435797422, 0.05640252782101291]
# guassian on quincunx grid
gquinc = [0.169917, 0.108947, 0.069855, 0.0287182]
rgb = np.empty([TS*TS, 3], dtype=np.float32)
delh = np.empty(TS*TS, dtype=np.float32)
delv = np.empty(TS*TS, dtype=np.float32)
delhsq = np.empty(TS*TS, dtype=np.float32)
delvsq = np.empty(TS*TS, dtype=np.float32)
dirwts = np.empty([TS*TS, 2], dtype=np.float32)
vcd = np.empty(TS*TS, dtype=np.float32)
hcd = np.empty(TS*TS, dtype=np.float32)
vcdalt = np.empty(TS*TS, dtype=np.float32)
hcdalt = np.empty(TS*TS, dtype=np.float32)
vcdsq = np.empty(TS*TS, dtype=np.float32)
hcdsq = np.empty(TS*TS, dtype=np.float32)
cddiffsq = np.empty(TS*TS, dtype=np.float32)
hvwt = np.empty(TS*TS, dtype=np.float32)
Dgrb = np.empty([TS*TS, 2], dtype=np.float32)
delp = np.empty(TS*TS, dtype=np.float32)
delm = np.empty(TS*TS, dtype=np.float32)
rbint = np.empty(TS*TS, dtype=np.float32)
Dgrbh2 = np.empty(TS*TS, dtype=np.float32)
Dgrbv2 = np.empty(TS*TS, dtype=np.float32)
dgintv = np.empty(TS*TS, dtype=np.float32)
dginth = np.empty(TS*TS, dtype=np.float32)
Dgrbpsq1 = np.empty(TS*TS, dtype=np.float32)
Dgrbmsq1 = np.empty(TS*TS, dtype=np.float32)
cfa = np.empty(TS*TS, dtype=np.float32)
pmwt = np.empty(TS*TS, dtype=np.float32)
rbp = np.empty(TS*TS, dtype=np.float32)
rbm = np.empty(TS*TS, dtype=np.float32)
nyquist = np.empty(TS*TS, dtype=np.int32)
# determine GRBG coset; (ey,ex) is the offset of the R subarray
if cfarray[0][0] == 1:
if cfarray[0][1] == 0:
ex, ey = 1, 0
else:
ex, ey = 0, 1
else:
if cfarray[0][0] == 0:
ex = ey = 0
else:
ex = ey = 1
# Start main loop
loop_cnt = 1
for top in range(winy-16, winy+height, TS-32):
for left in range(winx-16, winx+width, TS-32):
print("Loop [{}]: top: {} left: {}".format(loop_cnt, top, left))
loop_cnt += 1
# location of tile bottom edge
bottom = min(top+TS, winy+height+16)
# location of tile right edge
right = min(left+TS, winx+width+16)
# tile width (=TS except for right edge of image)
rr1 = bottom - top
# tile height (=TS except for bottom edge of image)
cc1 = right - left
# rgb from input CFA data
# rgb values should be floating point number between 0 and 1
# after white balance multipliers are applied
# a 16 pixel border is added to each side of the image
# bookkeeping for borders
rrmin = 16 if top < winy else 0
ccmin = 16 if left < winx else 0
rrmax = winy+height-top if bottom>(winy+height) else rr1
ccmax = winx+width-left if right>(winx+width) else cc1
for rr in range(rrmin, rrmax):
row = rr + top
for cc in range(ccmin, ccmax):
col = cc + left
c = cfarray[rr, cc]
indx1 = rr * TS + cc
indx = row * width + col
rgb[indx1, c] = src[row, col] / 65535
cfa[indx1] = rgb[indx1, c]
# fill borders
if rrmin > 0:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[(32-rr)*TS+cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if rrmax < rr1:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc, c] = (src[(winy+height-rr-2), left+cc])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc, c]
if ccmin > 0:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[rr*TS+32-cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if ccmax < cc1:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+ccmax+cc, c] = (src[(top+rr), (winx+width-cc-2)])/65535
cfa[rr*TS+ccmax+cc] = rgb[rr*TS+ccmax+cc, c]
# also, fill the image corners
if rrmin > 0 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+cc][c] = rgb[(32-rr)*TS+(32-cc)][c]
cfa[(rr)*TS+cc] = rgb[(rr)*TS+cc][c]
if rrmax < rr1 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+ccmax+cc][c] = (src[(winy+height-rr-2)][(winx+width-cc-2)])/65535
cfa[(rrmax+rr)*TS+ccmax+cc] = rgb[(rrmax+rr)*TS+ccmax+cc][c]
if rrmin > 0 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+ccmax+cc][c] = (src[(winy+32-rr)][(winx+width-cc-2)])/65535
cfa[(rr)*TS+ccmax+cc] = rgb[(rr)*TS+ccmax+cc][c]
if rrmax < rr1 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc][c] = (src[(winy+height-rr-2)][(winx+32-cc)])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc][c]
# end of border fill
for rr in range(1, rr1-1):
for cc in range(1, cc1-1):
indx = rr*TS+cc
delh[indx] = abs(cfa[indx + 1] - cfa[indx - 1])
delv[indx] = abs(cfa[indx + v1] - cfa[indx - v1])
delhsq[indx] = SQR(delh[indx])
delvsq[indx] = SQR(delv[indx])
delp[indx] = abs(cfa[indx+p1]-cfa[indx-p1])
delm[indx] = abs(cfa[indx+m1]-cfa[indx-m1])
for rr in range(2, rr1-2):
for cc in range(2, cc1-2):
indx = rr*TS+cc
# vert directional averaging weights
dirwts[indx][0] = eps+delv[indx+v1]+delv[indx-v1]+delv[indx]
# horizontal weights
dirwts[indx][1] = eps+delh[indx+1]+delh[indx-1]+delh[indx]
if cfarray[rr, cc] & 1:
# for later use in diagonal interpolation
Dgrbpsq1[indx]=(SQR(cfa[indx]-cfa[indx-p1])+SQR(cfa[indx]-cfa[indx+p1]))
Dgrbmsq1[indx]=(SQR(cfa[indx]-cfa[indx-m1])+SQR(cfa[indx]-cfa[indx+m1]))
for rr in range(4, rr1 - 4):
for cc in range(4, cc1 - 4):
indx = rr*TS+cc
c = cfarray[rr, cc]
sgn = -1 if c & 1 else 1
# initialization of nyquist test
nyquist[indx]=0
# preparation for diag interp
rbint[indx]=0
# color ratios in each cardinal direction
cru = cfa[indx - v1] * (dirwts[indx - v2][0] + dirwts[indx][0]) / (dirwts[indx - v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx - v2]))
crd = cfa[indx + v1] * (dirwts[indx + v2][0] + dirwts[indx][0]) / (dirwts[indx + v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx + v2]))
crl = cfa[indx - 1] * (dirwts[indx - 2][1] + dirwts[indx][1]) / (dirwts[indx - 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx - 2]))
crr = cfa[indx + 1] * (dirwts[indx + 2][1] + dirwts[indx][1]) / (dirwts[indx + 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx + 2]))
# G interpolated in vert/hor directions using Hamilton-Adams method
guha = min(clip_pt, cfa[indx - v1] + 0.5 * (cfa[indx] - cfa[indx - v2]))
gdha = min(clip_pt, cfa[indx + v1] + 0.5 * (cfa[indx] - cfa[indx + v2]))
glha = min(clip_pt, cfa[indx - 1] + 0.5 * (cfa[indx] - cfa[indx - 2]))
grha = min(clip_pt, cfa[indx + 1] + 0.5 * (cfa[indx] - cfa[indx + 2]))
# G interpolated in vert/hor directions using adaptive ratios
guar = cfa[indx] * cru if abs(1-cru) < arthresh else guha
gdar = cfa[indx] * crd if abs(1-crd) < arthresh else gdha
glar = cfa[indx] * crl if abs(1-crl) < arthresh else glha
grar = cfa[indx] * crr if abs(1-crr) < arthresh else grha
# adaptive weights for vertical/horizontal directions
hwt = dirwts[indx - 1][1] / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
vwt = dirwts[indx - v1][0] / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
# interpolated G via adaptive weighTS of cardinal evaluations
Gintvar = vwt * gdar + (1-vwt) * guar
Ginthar = hwt * grar + (1-hwt) * glar
Gintvha = vwt * gdha + (1-vwt) * guha
Ginthha = hwt * grha + (1-hwt) * glha
# interpolated color differences
vcd[indx] = sgn * (Gintvar-cfa[indx])
hcd[indx] = sgn * (Ginthar-cfa[indx])
vcdalt[indx] = sgn * (Gintvha-cfa[indx])
hcdalt[indx] = sgn * (Ginthha-cfa[indx])
if cfa[indx] > 0.8 * clip_pt or Gintvha > 0.8 * clip_pt or Ginthha > 0.8 * clip_pt:
# use HA if highlighTS are (nearly) clipped
guar = guha
gdar = gdha
glar = glha
grar = grha
vcd[indx] = vcdalt[indx]
hcd[indx] = hcdalt[indx]
# differences of interpolations in opposite directions
dgintv[indx] = min((guha - gdha) ** 2, (guar - gdar) ** 2)
dginth[indx] = min((glha - grha) ** 2, (glar - grar) ** 2)
for rr in range(4, rr1-4):
for cc in range(4, cc1-4):
c = cfarray[rr, cc]
hcdvar = 3*(SQR(hcd[indx-2])+SQR(hcd[indx])+SQR(hcd[indx+2]))-SQR(hcd[indx-2]+hcd[indx]+hcd[indx+2])
hcdaltvar = 3*(SQR(hcdalt[indx-2])+SQR(hcdalt[indx])+SQR(hcdalt[indx+2]))-SQR(hcdalt[indx-2]+hcdalt[indx]+hcdalt[indx+2])
vcdvar = 3*(SQR(vcd[indx-v2])+SQR(vcd[indx])+SQR(vcd[indx+v2]))-SQR(vcd[indx-v2]+vcd[indx]+vcd[indx+v2])
vcdaltvar = 3*(SQR(vcdalt[indx-v2])+SQR(vcdalt[indx])+SQR(vcdalt[indx+v2]))-SQR(vcdalt[indx-v2]+vcdalt[indx]+vcdalt[indx+v2])
# choose the smallest variance; this yields a smoother interpolation
if hcdaltvar < hcdvar:
hcd[indx] = hcdalt[indx]
if vcdaltvar < vcdvar:
vcd[indx] = vcdalt[indx]
# bound the interpolation in regions of high saturation
# vertical and horizontal G interpolations
if c & 1: # G site
Ginth = -hcd[indx] + cfa[indx]
Gintv = -vcd[indx] + cfa[indx]
if hcd[indx] > 0:
if 3 * hcd[indx] > (Ginth + cfa[indx]):
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
else:
hwt = 1 - 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (-np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx])
if vcd[indx] > 0:
if 3 * vcd[indx] > (Gintv + cfa[indx]):
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else:
vwt = 1 - 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (-np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx])
if Ginth > clip_pt:
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
if Gintv > clip_pt:
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else: # R or B site
Ginth = hcd[indx] + cfa[indx]
Gintv = vcd[indx] + cfa[indx]
if hcd[indx] < 0:
if 3 * hcd[indx] < -(Ginth + cfa[indx]):
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
else:
hwt = 1 + 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx])
if vcd[indx] < 0:
if 3 * vcd[indx] < -(Gintv + cfa[indx]):
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
else:
vwt = 1 + 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx])
if Ginth > clip_pt:
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
if Gintv > clip_pt:
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
vcdsq[indx] = SQR(vcd[indx])
hcdsq[indx] = SQR(hcd[indx])
cddiffsq[indx] = SQR(vcd[indx]-hcd[indx])
for rr in range(6, rr1-6):
for cc in range(6+(cfarray[rr, 2]&1), cc1-6, 2):
indx = rr * TS + cc
# compute color difference variances in cardinal directions
Dgrbvvaru = 4*(vcdsq[indx]+vcdsq[indx-v1]+vcdsq[indx-v2]+vcdsq[indx-v3])-SQR(vcd[indx]+vcd[indx-v1]+vcd[indx-v2]+vcd[indx-v3])
Dgrbvvard = 4*(vcdsq[indx]+vcdsq[indx+v1]+vcdsq[indx+v2]+vcdsq[indx+v3])-SQR(vcd[indx]+vcd[indx+v1]+vcd[indx+v2]+vcd[indx+v3])
Dgrbhvarl = 4*(hcdsq[indx]+hcdsq[indx-1]+hcdsq[indx-2]+hcdsq[indx-3])-SQR(hcd[indx]+hcd[indx-1]+hcd[indx-2]+hcd[indx-3])
Dgrbhvarr = 4*(hcdsq[indx]+hcdsq[indx+1]+hcdsq[indx+2]+hcdsq[indx+3])-SQR(hcd[indx]+hcd[indx+1]+hcd[indx+2]+hcd[indx+3])
hwt = dirwts[indx-1][1]/(dirwts[indx-1][1]+dirwts[indx+1][1])
vwt = dirwts[indx-v1][0]/(dirwts[indx+v1][0]+dirwts[indx-v1][0])
vcdvar = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
# compute fluctuations in up/down and left/right interpolations of colors
Dgrbvvaru = (dgintv[indx])+(dgintv[indx-v1])+(dgintv[indx-v2])
Dgrbvvard = (dgintv[indx])+(dgintv[indx+v1])+(dgintv[indx+v2])
Dgrbhvarl = (dginth[indx])+(dginth[indx-1])+(dginth[indx-2])
Dgrbhvarr = (dginth[indx])+(dginth[indx+1])+(dginth[indx+2])
vcdvar1 = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar1 = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
# determine adaptive weights for G interpolation
varwt=hcdvar/(vcdvar+hcdvar)
diffwt=hcdvar1/(vcdvar1+hcdvar1)
# if both agree on interpolation direction, choose the one with strongest directional discrimination;
# otherwise, choose the u/d and l/r difference fluctuation weights
if ((0.5 - varwt) * (0.5 - diffwt) > 0) and (abs(0.5 - diffwt) < abs(0.5 - varwt)):
hvwt[indx] = varwt
else:
hvwt[indx] = diffwt
# Nyquist test
for rr in range(6, rr1-6):
for cc in range(6 + (cfarray[rr, 2]&1), cc1 - 6, 2):
indx = rr * TS + cc
# nyquist texture test: ask if difference of vcd compared to hcd is larger or smaller than RGGB gradients
nyqtest = (gaussodd[0]*cddiffsq[indx] + gaussodd[1]*(cddiffsq[indx-m1]+cddiffsq[indx+p1] + cddiffsq[indx-p1]+cddiffsq[indx+m1]) + gaussodd[2]*(cddiffsq[indx-v2]+cddiffsq[indx-2]+ cddiffsq[indx+2]+cddiffsq[indx+v2]) + gaussodd[3]*(cddiffsq[indx-m2]+cddiffsq[indx+p2] + cddiffsq[indx-p2]+cddiffsq[indx+m2]))
nyqtest -= nyqthresh*(gaussgrad[0]*(delhsq[indx]+delvsq[indx])+gaussgrad[1]*(delhsq[indx-v1]+delvsq[indx-v1]+delhsq[indx+1]+delvsq[indx+1] + delhsq[indx-1]+delvsq[indx-1]+delhsq[indx+v1]+delvsq[indx+v1])+ gaussgrad[2]*(delhsq[indx-m1]+delvsq[indx-m1]+delhsq[indx+p1]+delvsq[indx+p1]+ delhsq[indx-p1]+delvsq[indx-p1]+delhsq[indx+m1]+delvsq[indx+m1])+ gaussgrad[3]*(delhsq[indx-v2]+delvsq[indx-v2]+delhsq[indx-2]+delvsq[indx-2]+ delhsq[indx+2]+delvsq[indx+2]+delhsq[indx+v2]+delvsq[indx+v2])+ gaussgrad[4]*(delhsq[indx-2*TS-1]+delvsq[indx-2*TS-1]+delhsq[indx-2*TS+1]+delvsq[indx-2*TS+1]+ delhsq[indx-TS-2]+delvsq[indx-TS-2]+delhsq[indx-TS+2]+delvsq[indx-TS+2]+ delhsq[indx+TS-2]+delvsq[indx+TS-2]+delhsq[indx+TS+2]+delvsq[indx-TS+2]+ delhsq[indx+2*TS-1]+delvsq[indx+2*TS-1]+delhsq[indx+2*TS+1]+delvsq[indx+2*TS+1])+ gaussgrad[5]*(delhsq[indx-m2]+delvsq[indx-m2]+delhsq[indx+p2]+delvsq[indx+p2]+ delhsq[indx-p2]+delvsq[indx-p2]+delhsq[indx+m2]+delvsq[indx+m2]))
if nyqtest > 0:
# nyquist=1 for nyquist region
nyquist[indx] = 1
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
areawt=(nyquist[indx-v2]+nyquist[indx-m1]+nyquist[indx+p1]+nyquist[indx-2]+nyquist[indx]+nyquist[indx+2]+nyquist[indx-p1]+nyquist[indx+m1]+nyquist[indx+v2])
# if most of your neighbors are named Nyquist, it's likely that you're one too
nyquist[indx] = 1 if areawt > 4 else 0
# end of Nyquist test
# in areas of Nyquist texture, do area interpolation
for rr in range(8, rr1 - 8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
# area interpolation
sumh = sumv = sumsqh = sumsqv = areawt = 0
for i in range(-6, 7, 2):
for j in range(-6, 7, 2):
indx1 = (rr + i) * TS + cc + j
if nyquist[indx1]:
sumh += cfa[indx1] - 0.5 * (cfa[indx1-1]+cfa[indx1+1])
sumv += cfa[indx1] - 0.5 * (cfa[indx1-v1]+cfa[indx1+v1])
sumsqh += 0.5 * (SQR(cfa[indx1]-cfa[indx1-1]) + SQR(cfa[indx1]-cfa[indx1+1]))
sumsqv += 0.5 * (SQR(cfa[indx1]-cfa[indx1-v1]) + SQR(cfa[indx1]-cfa[indx1+v1]))
areawt += 1
# horizontal and vertical color differences, and adaptive weight
hcdvar = epssq + max(0, areawt*sumsqh-sumh*sumh)
vcdvar = epssq + max(0, areawt*sumsqv-sumv*sumv)
hvwt[indx] = hcdvar / (vcdvar + hcdvar)
# end of area interpolation
# populate G at R/B sites
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
# first ask if one gets more directional discrimination from nearby B/R sites
hvwtalt = 0.25 * (hvwt[indx-m1] + hvwt[indx+p1] + hvwt[indx-p1] + hvwt[indx+m1])
vo = abs(0.5 - hvwt[indx])
ve = abs(0.5 - hvwtalt)
# a better result was obtained from the neighbors
if vo < ve:
hvwt[indx>>1] = hvwtalt
# evaluate color differences
Dgrb[indx][0] = (hcd[indx]*(1-hvwt[indx]) + vcd[indx]*hvwt[indx])
# evaluate G
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
# local curvature in G (preparation for nyquist refinement step)
if nyquist[indx]:
Dgrbh2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-1][1]+rgb[indx+1][1]))
Dgrbv2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-v1][1]+rgb[indx+v1][1]))
else:
Dgrbh2[indx] = Dgrbv2[indx] = 0
# end of standard interpolation
# refine Nyquist areas using G curvatures
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
# local averages (over Nyquist pixels only) of G curvature squared
gvarh = epssq + (gquinc[0]*Dgrbh2[indx]+gquinc[1]*(Dgrbh2[indx-m1]+Dgrbh2[indx+p1]+Dgrbh2[indx-p1]+Dgrbh2[indx+m1])+gquinc[2]*(Dgrbh2[indx-v2]+Dgrbh2[indx-2]+Dgrbh2[indx+2]+Dgrbh2[indx+v2])+gquinc[3]*(Dgrbh2[indx-m2]+Dgrbh2[indx+p2]+Dgrbh2[indx-p2]+Dgrbh2[indx+m2]))
gvarv = epssq + (gquinc[0]*Dgrbv2[indx]+gquinc[1]*(Dgrbv2[indx-m1]+Dgrbv2[indx+p1]+Dgrbv2[indx-p1]+Dgrbv2[indx+m1])+gquinc[2]*(Dgrbv2[indx-v2]+Dgrbv2[indx-2]+Dgrbv2[indx+2]+Dgrbv2[indx+v2])+gquinc[3]*(Dgrbv2[indx-m2]+Dgrbv2[indx+p2]+Dgrbv2[indx-p2]+Dgrbv2[indx+m2]))
# use the results as weights for refined G interpolation
Dgrb[indx][0] = (hcd[indx]*gvarv + vcd[indx]*gvarh)/(gvarv+gvarh)
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
# diagonal interpolation correction
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
rbvarp = epssq + (gausseven[0]*(Dgrbpsq1[indx-v1]+Dgrbpsq1[indx-1]+Dgrbpsq1[indx+1]+Dgrbpsq1[indx+v1]) + gausseven[1]*(Dgrbpsq1[indx-v2-1]+Dgrbpsq1[indx-v2+1]+Dgrbpsq1[indx-2-v1]+Dgrbpsq1[indx+2-v1]+ Dgrbpsq1[indx-2+v1]+Dgrbpsq1[indx+2+v1]+Dgrbpsq1[indx+v2-1]+Dgrbpsq1[indx+v2+1]))
rbvarm = epssq + (gausseven[0]*(Dgrbmsq1[indx-v1]+Dgrbmsq1[indx-1]+Dgrbmsq1[indx+1]+Dgrbmsq1[indx+v1]) + gausseven[1]*(Dgrbmsq1[indx-v2-1]+Dgrbmsq1[indx-v2+1]+Dgrbmsq1[indx-2-v1]+Dgrbmsq1[indx+2-v1]+ Dgrbmsq1[indx-2+v1]+Dgrbmsq1[indx+2+v1]+Dgrbmsq1[indx+v2-1]+Dgrbmsq1[indx+v2+1]))
# diagonal color ratios
crse=2*(cfa[indx+m1])/(eps+cfa[indx]+(cfa[indx+m2]))
crnw=2*(cfa[indx-m1])/(eps+cfa[indx]+(cfa[indx-m2]))
crne=2*(cfa[indx+p1])/(eps+cfa[indx]+(cfa[indx+p2]))
crsw=2*(cfa[indx-p1])/(eps+cfa[indx]+(cfa[indx-p2]))
# assign B/R at R/B sites
if abs(1 - crse) < arthresh:
rbse = cfa[indx] * crse
else:
rbse = cfa[indx + m1] + 0.5 * (cfa[indx] - cfa[indx + m2])
if abs(1 - crnw) < arthresh:
rbnw = (cfa[indx - m1]) + 0.5 *(cfa[indx] - cfa[indx - m2])
if abs(1 - crne) < arthresh:
rbne = cfa[indx] * crne
else:
rbne = (cfa[indx + p1]) + 0.5 * cfa[indx] - cfa[indx + p2]
if abs(1 - crsw) < arthresh:
rbsw = cfa[indx] * crsw
else:
rbsw = (cfa[indx - p1]) + 0.5 * (cfa[indx] - cfa[indx - p2])
wtse= eps+delm[indx]+delm[indx+m1]+delm[indx+m2] # same as for wtu,wtd,wtl,wtr
wtnw= eps+delm[indx]+delm[indx-m1]+delm[indx-m2]
wtne= eps+delp[indx]+delp[indx+p1]+delp[indx+p2]
wtsw= eps+delp[indx]+delp[indx-p1]+delp[indx-p2]
rbm[indx] = (wtse*rbnw+wtnw*rbse)/(wtse+wtnw)
rbp[indx] = (wtne*rbsw+wtsw*rbne)/(wtne+wtsw)
pmwt[indx] = rbvarm/(rbvarp+rbvarm)
# bound the interpolation in regions of high saturation
if rbp[indx] < cfa[indx]:
if 2 * (rbp[indx]) < cfa[indx]:
rbp[indx] = np.median([rbp[indx] , cfa[indx - p1], cfa[indx + p1]])
else:
pwt = 2 * (cfa[indx] - rbp[indx]) / (eps + rbp[indx] + cfa[indx])
rbp[indx] = pwt * rbp[indx] + (1 - pwt) * np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] < cfa[indx]:
if 2 * (rbm[indx]) < cfa[indx]:
rbm[indx] = np.median([rbm[indx] , cfa[indx - m1], cfa[indx + m1]])
else:
mwt = 2 * (cfa[indx] - rbm[indx]) / (eps + rbm[indx] + cfa[indx])
rbm[indx] = mwt * rbm[indx] + (1 - mwt) * np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
if rbp[indx] > clip_pt:
rbp[indx] = np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] > clip_pt:
rbm[indx] = np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
for rr in range(10, rr1-10):
for cc in range(10 + (cfarray[rr, 2]&1), cc1-10, 2):
indx = rr * TS + cc
# first ask if one geTS more directional discrimination from nearby B/R sites
pmwtalt = 0.25*(pmwt[indx-m1]+pmwt[indx+p1]+pmwt[indx-p1]+pmwt[indx+m1])
vo = abs(0.5-pmwt[indx])
ve = abs(0.5-pmwtalt)
if vo < ve:
pmwt[indx] = pmwtalt
rbint[indx] = 0.5*(cfa[indx] + rbm[indx]*(1-pmwt[indx]) + rbp[indx]*pmwt[indx])
for rr in range(12, rr1 - 12):
for cc in range(12 + (cfarray[rr, 2]&1), cc1 - 12, 2):
indx = rr * TS + cc
if abs(0.5 - pmwt[indx]) < abs(0.5 - hvwt[indx]):
continue
# now interpolate G vertically/horizontally using R+B values
# unfortunately, since G interpolation cannot be done diagonally this may lead to colour shifts
# colour ratios for G interpolation
cru = cfa[indx-v1]*2/(eps+rbint[indx]+rbint[indx-v2])
crd = cfa[indx+v1]*2/(eps+rbint[indx]+rbint[indx+v2])
crl = cfa[indx-1]*2/(eps+rbint[indx]+rbint[indx-2])
crr = cfa[indx+1]*2/(eps+rbint[indx]+rbint[indx+2])
# interpolated G via adaptive ratios or Hamilton-Adams in each cardinal direction
if abs(1 - cru) < arthresh:
gu = rbint[indx] * cru
else:
gu = cfa[indx - v1] + 0.5 * (rbint[indx] - rbint[(indx - v1)])
if abs(1 - crd) < arthresh:
gd = rbint[indx] * crd
else:
gd = cfa[indx + v1] + 0.5 * (rbint[indx] - rbint[(indx + v1)])
if abs(1 - crl) < arthresh:
gl = rbint[indx] * crl
else:
gl = cfa[indx - 1] + 0.5 * (rbint[indx] - rbint[(indx - 1)])
if abs(1 - crr) < arthresh:
gr = rbint[indx] * crr
else:
gr = cfa[indx + 1] + 0.5 * (rbint[indx] - rbint[(indx + 1)])
# interpolated G via adaptive weighTS of cardinal evaluations
Gintv = (dirwts[indx - v1][0] * gd + dirwts[indx + v1][0] * gu) / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
Ginth = (dirwts[indx - 1][1] * gr + dirwts[indx + 1][1] * gl) / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
# bound the interpolation in regions of high saturation
if Gintv < rbint[indx]:
if (2 * Gintv < rbint[indx]):
Gintv = np.median([Gintv , cfa[indx - v1], cfa[indx + v1]])
else:
vwt = 2 * (rbint[indx] - Gintv) / (eps + Gintv + rbint[indx])
Gintv = vwt * Gintv + (1 - vwt) * np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
if Ginth < rbint[indx]:
if 2 * Ginth < rbint[indx]:
Ginth = np.median([Ginth , cfa[indx - 1], cfa[indx + 1]])
else:
hwt = 2 * (rbint[indx] - Ginth) / (eps + Ginth + rbint[indx])
Ginth = hwt * Ginth + (1 - hwt) * np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Ginth > clip_pt:
Ginth = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Gintv > clip_pt:
Gintv = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
rgb[indx][1] = Ginth*(1-hvwt[indx]) + Gintv*hvwt[indx]
Dgrb[indx][0] = rgb[indx][1]-cfa[indx]
# end of diagonal interpolation correction
# fancy chrominance interpolation
# (ey,ex) is location of R site
for rr in range(13-ey, rr1-12, 2):
for cc in range(13-ex, cc1-12, 2):
indx = rr*TS+cc
Dgrb[indx][1]=Dgrb[indx][0] # split out G-B from G-R
Dgrb[indx][0]=0
for rr in range(12, rr1-12):
c = int(1- cfarray[rr, 12+(cfarray[rr,2]&1)]/2)
for cc in range(12+(cfarray[rr,2]&1), cc1-12, 2):
indx = rr * TS + cc
wtnw=1/(eps+abs(Dgrb[indx-m1][c]-Dgrb[indx+m1][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx-m3][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-m3][c]))
wtne=1/(eps+abs(Dgrb[indx+p1][c]-Dgrb[indx-p1][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx+p3][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+p3][c]))
wtsw=1/(eps+abs(Dgrb[indx-p1][c]-Dgrb[indx+p1][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+m3][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx-p3][c]))
wtse=1/(eps+abs(Dgrb[indx+m1][c]-Dgrb[indx-m1][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-p3][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx+m3][c]))
Dgrb[indx][c]=(wtnw*(1.325*Dgrb[indx-m1][c]-0.175*Dgrb[indx-m3][c]-0.075*Dgrb[indx-m1-2][c]-0.075*Dgrb[indx-m1-v2][c] )+ wtne*(1.325*Dgrb[indx+p1][c]-0.175*Dgrb[indx+p3][c]-0.075*Dgrb[indx+p1+2][c]-0.075*Dgrb[indx+p1+v2][c] )+ wtsw*(1.325*Dgrb[indx-p1][c]-0.175*Dgrb[indx-p3][c]-0.075*Dgrb[indx-p1-2][c]-0.075*Dgrb[indx-p1-v2][c] )+ wtse*(1.325*Dgrb[indx+m1][c]-0.175*Dgrb[indx+m3][c]-0.075*Dgrb[indx+m1+2][c]-0.075*Dgrb[indx+m1+v2][c] ))/(wtnw+wtne+wtsw+wtse)
for rr in range(12, rr1-12):
# c = int(cfarray[rr, 12+(cfarray[rr,1]&1)+1]/2)
for cc in range(12+(cfarray[rr,1]&1), cc1-12, 2):
for c in range(2):
Dgrb[indx][c]=((hvwt[indx-v1])*Dgrb[indx-v1][c]+(1-hvwt[indx+1])*Dgrb[indx+1][c]+(1-hvwt[indx-1])*Dgrb[indx-1][c]+(hvwt[indx+v1])*Dgrb[indx+v1][c])/((hvwt[indx-v1])+(1-hvwt[indx+1])+(1-hvwt[indx-1])+(hvwt[indx+v1]))
for rr in range(12, rr1-12):
for cc in range(12, cc1-12):
indx = rr * TS + cc
rgb[indx][0]=(rgb[indx][1]-Dgrb[indx][0])
rgb[indx][2]=(rgb[indx][1]-Dgrb[indx][1])
# copy smoothed results back to image matrix
for rr in range(16, rr1-16):
row = rr + top
for cc in range(16, cc1-16):
col = cc + left
for c in range(3):
image[row, col, c] = int(rgb[rr*TS+cc, c] * 65535 + 0.5)
# end of main loop
return image
# Define some utility functions for demosaicing
# For AMAzE
def fc(cfa, r, c):
return cfa[r&1, c&1]
def intp(a, b, c):
return a * (b - c) + c
def SQR(x):
return x ** 2
|
import copy
from datetime import datetime
from functools import wraps, update_wrapper
from hashlib import blake2b
import logging
from math import log
import os
from subprocess import Popen, PIPE
import uuid
from dateutil import parser
import elasticsearch
import pymysql
from rich import box
from rich.console import Console
from rich.table import Table
main_cursor = None
HOST = "dodata"
conn = None
CURDIR = os.getcwd()
LOG = logging.getLogger(__name__)
ABBREV_MAP = {
"p": "profox",
"l": "prolinux",
"y": "propython",
"d": "dabo-dev",
"u": "dabo-users",
"c": "codebook",
}
NAME_COLOR = "bright_red"
IntegrityError = pymysql.err.IntegrityError
def runproc(cmd):
proc = Popen([cmd], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdout_text, stderr_text = proc.communicate()
return stdout_text, stderr_text
def _parse_creds():
fpath = os.path.expanduser("~/.dbcreds")
with open(fpath) as ff:
lines = ff.read().splitlines()
ret = {}
for ln in lines:
key, val = ln.split("=")
ret[key] = val
return ret
def connect():
cls = pymysql.cursors.DictCursor
creds = _parse_creds()
db = creds.get("DB_NAME") or "webdata"
ret = pymysql.connect(
host=HOST,
user=creds["DB_USERNAME"],
passwd=creds["DB_PWD"],
db=db,
charset="utf8",
cursorclass=cls,
)
return ret
def gen_uuid():
return str(uuid.uuid4())
def get_cursor():
global conn, main_cursor
if not (conn and conn.open):
LOG.debug("No DB connection")
main_cursor = None
conn = connect()
if not main_cursor:
LOG.debug("No cursor")
main_cursor = conn.cursor(pymysql.cursors.DictCursor)
return main_cursor
def commit():
conn.commit()
def logit(*args):
argtxt = [str(arg) for arg in args]
msg = " ".join(argtxt) + "\n"
with open("LOGOUT", "a") as ff:
ff.write(msg)
def debugout(*args):
with open("/tmp/debugout", "a") as ff:
ff.write("YO!")
argtxt = [str(arg) for arg in args]
msg = " ".join(argtxt) + "\n"
with open("/tmp/debugout", "a") as ff:
ff.write(msg)
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers["Last-Modified"] = datetime.now()
response.headers["Cache-Control"] = (
"no-store, no-cache, " "must-revalidate, post-check=0, pre-check=0, max-age=0"
)
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "-1"
return response
return update_wrapper(no_cache, view)
def human_fmt(num):
"""Human friendly file size"""
# Make sure that we get a valid input. If an invalid value is passed, we
# want the exception to be raised.
num = int(num)
units = list(zip(["bytes", "K", "MB", "GB", "TB", "PB"], [0, 0, 1, 2, 2, 2]))
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit, num_decimals = units[exponent]
format_string = "{:.%sf} {}" % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return "0 bytes"
if num == 1:
return "1 byte"
def format_number(num):
"""Return a number representation with comma separators."""
snum = str(num)
parts = []
while snum:
snum, part = snum[:-3], snum[-3:]
parts.append(part)
parts.reverse()
return ",".join(parts)
def get_elastic_client():
return elasticsearch.Elasticsearch(host=HOST)
def _get_mapping():
es_client = get_elastic_client()
return es_client.indices.get_mapping()
def get_indices():
return list(_get_mapping().keys())
def get_mapping(index):
"""Returns the field definitions for the specified index"""
props = _get_mapping().get(index, {}).get("mappings", {}).get("properties", {})
return props
def get_fields(index):
"""Returns just the field names for the specified index"""
return get_mapping(index).keys()
def gen_key(orig_rec, digest_size=8):
"""Generates a hash value by concatenating the values in the dictionary."""
# Don't modify the original dict
rec = copy.deepcopy(orig_rec)
# Remove the 'id' field, if present
rec.pop("id", None)
m = blake2b(digest_size=digest_size)
txt_vals = ["%s" % val for val in rec.values()]
txt_vals.sort()
txt = "".join(txt_vals)
m.update(txt.encode("utf-8"))
return m.hexdigest()
def extract_records(resp):
return [r["_source"] for r in resp["hits"]["hits"]]
def massage_date(val):
dt = parser.parse(val)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def massage_date_records(records, field_name):
for rec in records:
rec[field_name] = massage_date(rec[field_name])
def print_messages(recs):
console = Console()
table = Table(show_header=True, header_style="bold blue_violet")
table.add_column("MSG #", justify="right")
table.add_column("List")
table.add_column("Posted", justify="right")
table.add_column("From")
table.add_column("Subject")
for rec in recs:
table.add_row(
str(rec["msg_num"]),
ABBREV_MAP.get(rec["list_name"]),
massage_date(rec["posted"]),
rec["from"],
rec["subject"],
)
console.print(table)
def print_message_list(recs):
console = Console()
table = Table(show_header=True, header_style="bold cyan", box=box.HEAVY)
# table.add_column("ID", style="dim", width=13)
table.add_column("MSG #")
table.add_column("List")
table.add_column("Posted")
table.add_column("From")
table.add_column("Subject")
for rec in recs:
sender_parts = rec["from"].split("<")
name = sender_parts[0]
addr = f"<{sender_parts[1]}" if len(sender_parts) > 1 else ""
sender = f"[bold {NAME_COLOR}]{name}[/bold {NAME_COLOR}]{addr}"
subj = rec["subject"]
low_subj = subj.lower()
if low_subj.startswith("re:") or low_subj.startswith("aw:"):
subj = f"[green]{subj[:3]}[/green]{subj[3:]}"
table.add_row(
str(rec["msg_num"]),
ABBREV_MAP.get(rec["list_name"]),
rec["posted"],
sender,
subj,
)
console.print(table)
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple client class to speak with any RESTful service that implements
the Glance Registry API
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from glance.common.client import BaseClient
from glance.common import crypt
from glance import i18n
from glance.registry.api.v1 import images
LOG = logging.getLogger(__name__)
_LE = i18n._LE
class RegistryClient(BaseClient):
"""A client for the Registry image metadata service."""
DEFAULT_PORT = 9191
def __init__(self, host=None, port=None, metadata_encryption_key=None,
identity_headers=None, **kwargs):
"""
:param metadata_encryption_key: Key used to encrypt 'location' metadata
"""
self.metadata_encryption_key = metadata_encryption_key
# NOTE (dprince): by default base client overwrites host and port
# settings when using keystone. configure_via_auth=False disables
# this behaviour to ensure we still send requests to the Registry API
self.identity_headers = identity_headers
# store available passed request id for do_request call
self._passed_request_id = kwargs.pop('request_id', None)
BaseClient.__init__(self, host, port, configure_via_auth=False,
**kwargs)
def decrypt_metadata(self, image_metadata):
if self.metadata_encryption_key:
if image_metadata.get('location'):
location = crypt.urlsafe_decrypt(self.metadata_encryption_key,
image_metadata['location'])
image_metadata['location'] = location
if image_metadata.get('location_data'):
ld = []
for loc in image_metadata['location_data']:
url = crypt.urlsafe_decrypt(self.metadata_encryption_key,
loc['url'])
ld.append({'id': loc['id'], 'url': url,
'metadata': loc['metadata'],
'status': loc['status']})
image_metadata['location_data'] = ld
return image_metadata
def encrypt_metadata(self, image_metadata):
if self.metadata_encryption_key:
location_url = image_metadata.get('location')
if location_url:
location = crypt.urlsafe_encrypt(self.metadata_encryption_key,
location_url,
64)
image_metadata['location'] = location
if image_metadata.get('location_data'):
ld = []
for loc in image_metadata['location_data']:
if loc['url'] == location_url:
url = location
else:
url = crypt.urlsafe_encrypt(
self.metadata_encryption_key, loc['url'], 64)
ld.append({'url': url, 'metadata': loc['metadata'],
'status': loc['status'],
# NOTE(zhiyan): New location has no ID field.
'id': loc.get('id')})
image_metadata['location_data'] = ld
return image_metadata
def get_images(self, **kwargs):
"""
Returns a list of image id/name mappings from Registry
:param filters: dict of keys & expected values to filter results
:param marker: image id after which to start page
:param limit: max number of images to return
:param sort_key: results will be ordered by this image attribute
:param sort_dir: direction in which to order results (asc, desc)
"""
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
res = self.do_request("GET", "/images", params=params)
image_list = jsonutils.loads(res.read())['images']
for image in image_list:
image = self.decrypt_metadata(image)
return image_list
def do_request(self, method, action, **kwargs):
try:
kwargs['headers'] = kwargs.get('headers', {})
kwargs['headers'].update(self.identity_headers or {})
if self._passed_request_id:
kwargs['headers']['X-Openstack-Request-ID'] = (
self._passed_request_id)
res = super(RegistryClient, self).do_request(method,
action,
**kwargs)
status = res.status
request_id = res.getheader('x-openstack-request-id')
msg = ("Registry request %(method)s %(action)s HTTP %(status)s"
" request id %(request_id)s" %
{'method': method, 'action': action,
'status': status, 'request_id': request_id})
LOG.debug(msg)
except Exception as exc:
with excutils.save_and_reraise_exception():
exc_name = exc.__class__.__name__
LOG.exception(_LE("Registry client request %(method)s "
"%(action)s raised %(exc_name)s"),
{'method': method, 'action': action,
'exc_name': exc_name})
return res
def get_images_detailed(self, **kwargs):
"""
Returns a list of detailed image data mappings from Registry
:param filters: dict of keys & expected values to filter results
:param marker: image id after which to start page
:param limit: max number of images to return
:param sort_key: results will be ordered by this image attribute
:param sort_dir: direction in which to order results (asc, desc)
"""
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
res = self.do_request("GET", "/images/detail", params=params)
image_list = jsonutils.loads(res.read())['images']
for image in image_list:
image = self.decrypt_metadata(image)
return image_list
def get_image(self, image_id):
"""Returns a mapping of image metadata from Registry."""
res = self.do_request("GET", "/images/%s" % image_id)
data = jsonutils.loads(res.read())['image']
return self.decrypt_metadata(data)
def add_image(self, image_metadata):
"""
Tells registry about an image's metadata
"""
headers = {
'Content-Type': 'application/json',
}
if 'image' not in image_metadata:
image_metadata = dict(image=image_metadata)
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
image_metadata['image'] = encrypted_metadata
body = jsonutils.dumps(image_metadata)
res = self.do_request("POST", "/images", body=body, headers=headers)
# Registry returns a JSONified dict(image=image_info)
data = jsonutils.loads(res.read())
image = data['image']
return self.decrypt_metadata(image)
def update_image(self, image_id, image_metadata, purge_props=False,
from_state=None):
"""
Updates Registry's information about an image
"""
if 'image' not in image_metadata:
image_metadata = dict(image=image_metadata)
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
image_metadata['image'] = encrypted_metadata
image_metadata['from_state'] = from_state
body = jsonutils.dumps(image_metadata)
headers = {
'Content-Type': 'application/json',
}
if purge_props:
headers["X-Glance-Registry-Purge-Props"] = "true"
res = self.do_request("PUT", "/images/%s" % image_id, body=body,
headers=headers)
data = jsonutils.loads(res.read())
image = data['image']
return self.decrypt_metadata(image)
def delete_image(self, image_id):
"""
Deletes Registry's information about an image
"""
res = self.do_request("DELETE", "/images/%s" % image_id)
data = jsonutils.loads(res.read())
image = data['image']
return image
def get_image_members(self, image_id):
"""Return a list of membership associations from Registry."""
res = self.do_request("GET", "/images/%s/members" % image_id)
data = jsonutils.loads(res.read())['members']
return data
def get_member_images(self, member_id):
"""Return a list of membership associations from Registry."""
res = self.do_request("GET", "/shared-images/%s" % member_id)
data = jsonutils.loads(res.read())['shared_images']
return data
def replace_members(self, image_id, member_data):
"""Replace registry's information about image membership."""
if isinstance(member_data, (list, tuple)):
member_data = dict(memberships=list(member_data))
elif (isinstance(member_data, dict) and
'memberships' not in member_data):
member_data = dict(memberships=[member_data])
body = jsonutils.dumps(member_data)
headers = {'Content-Type': 'application/json', }
res = self.do_request("PUT", "/images/%s/members" % image_id,
body=body, headers=headers)
return self.get_status_code(res) == 204
def add_member(self, image_id, member_id, can_share=None):
"""Add to registry's information about image membership."""
body = None
headers = {}
# Build up a body if can_share is specified
if can_share is not None:
body = jsonutils.dumps(dict(member=dict(can_share=can_share)))
headers['Content-Type'] = 'application/json'
url = "/images/%s/members/%s" % (image_id, member_id)
res = self.do_request("PUT", url, body=body,
headers=headers)
return self.get_status_code(res) == 204
def delete_member(self, image_id, member_id):
"""Delete registry's information about image membership."""
res = self.do_request("DELETE", "/images/%s/members/%s" %
(image_id, member_id))
return self.get_status_code(res) == 204
|
import unittest
from mollufy import mollufy
class MollufyTestSimple(unittest.TestCase):
def test_mollufy_word_2chars(self):
# TEST 1: Mollufy simple 2-characters noun word
self.assertEqual(mollufy.mollufy("블루"), "블?루")
self.assertEqual(mollufy.mollufy("하루"), "하?루")
self.assertEqual(mollufy.mollufy("감정"), "감?정")
def test_mollufy_word_manychars_without_param(self):
# TEST 2: Ensure 3-characters-or-above noun word not to be mollufied without parameter
self.assertEqual(mollufy.mollufy("마술사"), "마술사")
self.assertEqual(mollufy.mollufy("모니터"), "모니터")
self.assertEqual(mollufy.mollufy("아이스크림"), "아이스크림")
def test_mollufy_word_manychars(self):
# TEST 3: Mollufy 3-characters-or-above noun word with parameter
self.assertEqual(mollufy.mollufy("슬리퍼", True), "슬리?퍼")
self.assertEqual(mollufy.mollufy("이구동성", True), "이구동?성")
self.assertEqual(mollufy.mollufy("아메리카노", True), "아메리카?노")
def test_mollufy_non_noun_word(self):
# TEST 4: Ensure non-noun words not to be mollufied
self.assertEqual(mollufy.mollufy("좋아"), "좋아")
self.assertEqual(mollufy.mollufy("그만해", True), "그만해")
self.assertEqual(mollufy.mollufy("냠냠쩝쩝", True), "냠냠쩝쩝")
class MollufyTestSentence(unittest.TestCase):
def test_mollufy_sentence_with_one_2chars_word(self):
# TEST 5: Mollufy sentence with one 2-characters noun word
self.assertEqual(mollufy.mollufy("안녕하세요"), "안?녕하세요")
self.assertEqual(mollufy.mollufy("바다에 갑시다"), "바?다에 갑시다")
self.assertEqual(mollufy.mollufy("재미있는 게임인데"), "재미있는 게?임인데")
def test_mollufy_sentence_with_one_manychar_word(self):
# TEST 6: Mollufy sentence with one 3-characters-or-above noun word
self.assertEqual(mollufy.mollufy("참관인이세요?", True), "참관?인이세요?")
self.assertEqual(mollufy.mollufy("보드카 너무 써", True), "보드?카 너무 써")
self.assertEqual(mollufy.mollufy("필라멘트가 타버렸네", True), "필라멘?트가 타버렸네")
def test_mollufy_sentence_with_many_2chars_words(self):
# TEST 7: Mollufy sentence with many 2-characters noun words
self.assertEqual(mollufy.mollufy("내가 재미있는 게임을 하나 알아냈는데, 나중에 검색해봐"), "내가 재미있는 게?임을 하나 알아냈는데, 나?중에 검?색해봐")
self.assertEqual(mollufy.mollufy("그야말로 연애재판 너는 나에게 얼마만큼의 죄를 물을 거니?"), "그야말로 연?애재?판 너는 나에게 얼?마만큼의 죄를 물을 거니?")
self.assertEqual(mollufy.mollufy("두 글자 명사가 다수 존재하는 문장을 생각하기는 곤란하다"), "두 글?자 명?사가 다?수 존?재하는 문?장을 생?각하기는 곤?란하다")
def test_mollufy_sentence_with_many_words(self):
# TEST 8: Mollufy sentence with many noun words (without no length limit)
self.assertEqual(mollufy.mollufy("대한민국의 영토는 한반도와 그 부속도서로 한다.", True), "대한민?국의 영?토는 한반?도와 그 부?속도?서로 한다.")
self.assertEqual(mollufy.mollufy("대한민국은 통일을 지향하며, 자유민주적 기본질서에 입각한 평화적 통일 정책을 수립하고 이를 추진한다.", True), "대한민?국은 통?일을 지?향하며, 자?유민?주적 기?본질?서에 입?각한 평?화적 통?일 정?책을 수?립하고 이를 추?진한다.")
self.assertEqual(mollufy.mollufy("블루 아카이브 정말 건전하고 건강하고 밝은 게임인데...", True), "블?루 아카이?브 정말 건?전하고 건?강하고 밝은 게?임인데...")
def test_mollufy_sentence_with_many_words_without_param(self):
# TEST 9: Mollufy 2-characters noun words in sentence, not 3-characters-or-above noun words
self.assertEqual(mollufy.mollufy("그래픽 디자인은 특정 메시지 (혹은 콘텐츠)와 이를 전달하려는 대상자에게 걸맞은 매체 (인쇄물, 웹사이트, 동영상 등)를 선택하여 표현 또는 제작하는 창의적인 과정이다."),
"그래픽 디자인은 특?정 메시지 (혹은 콘텐츠)와 이를 전?달하려는 대상자에게 걸맞은 매?체 (인쇄물, 웹사이트, 동영상 등)를 선?택하여 표?현 또는 제?작하는 창?의적인 과?정이다.")
class MollufyTestMeme(unittest.TestCase):
def test_mollufy_meme_words(self):
# TEST 10: Meme words
self.assertEqual(mollufy.mollufy("몰루"), "몰?루")
self.assertEqual(mollufy.mollufy("코하루"), "코하?루")
self.assertEqual(mollufy.mollufy("아루"), "아?루")
self.assertEqual(mollufy.mollufy("네루"), "네?루")
def test_mollufy_meme_sentences(self):
# TEST 11: Meme sentences
self.assertEqual(mollufy.mollufy("몰루는건가..."), "몰?루는건가...")
self.assertEqual(mollufy.mollufy("내가 몰루가 될께..."), "내가 몰?루가 될께...")
class MollufyTestAltmark(unittest.TestCase):
def test_mollufy_altmark(self):
# TEST 12: Mollufy with alternative mark: [!]
self.assertEqual(mollufy.mollufy("바람", alternativeMark=True), "바!람")
self.assertEqual(mollufy.mollufy("아루", alternativeMark=True), "아!루")
self.assertEqual(mollufy.mollufy("스튜디오", True, True), "스튜디!오")
self.assertEqual(mollufy.mollufy("각설탕을 커피에 타먹으면 달게요 안 달게요~", True, True), "각설!탕을 커!피에 타먹으면 달게요 안 달게요~")
if __name__ == "__main__":
unittest.main()
|
import os
import stat
import sys
from stai.util.config import load_config, traverse_dict
from stai.util.permissions import octal_mode_string, verify_file_permissions
from logging import Logger
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
DEFAULT_PERMISSIONS_CERT_FILE: int = 0o644
DEFAULT_PERMISSIONS_KEY_FILE: int = 0o600
# Masks containing permission bits we don't allow
RESTRICT_MASK_CERT_FILE: int = stat.S_IWGRP | stat.S_IXGRP | stat.S_IWOTH | stat.S_IXOTH # 0o033
RESTRICT_MASK_KEY_FILE: int = (
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
) # 0o077
CERT_CONFIG_KEY_PATHS = [
"stai_ssl_ca:crt",
"daemon_ssl:private_crt",
"farmer:ssl:private_crt",
"farmer:ssl:public_crt",
"full_node:ssl:private_crt",
"full_node:ssl:public_crt",
"harvester:stai_ssl_ca:crt",
"harvester:private_ssl_ca:crt",
"harvester:ssl:private_crt",
"introducer:ssl:public_crt",
"private_ssl_ca:crt",
"timelord:ssl:private_crt",
"timelord:ssl:public_crt",
"ui:daemon_ssl:private_crt",
"wallet:ssl:private_crt",
"wallet:ssl:public_crt",
]
KEY_CONFIG_KEY_PATHS = [
"stai_ssl_ca:key",
"daemon_ssl:private_key",
"farmer:ssl:private_key",
"farmer:ssl:public_key",
"full_node:ssl:private_key",
"full_node:ssl:public_key",
"harvester:stai_ssl_ca:key",
"harvester:private_ssl_ca:key",
"harvester:ssl:private_key",
"introducer:ssl:public_key",
"private_ssl_ca:key",
"timelord:ssl:private_key",
"timelord:ssl:public_key",
"ui:daemon_ssl:private_key",
"wallet:ssl:private_key",
"wallet:ssl:public_key",
]
# Set to keep track of which files we've already warned about
warned_ssl_files: Set[Path] = set()
def get_all_ssl_file_paths(root_path: Path) -> Tuple[List[Path], List[Path]]:
"""Lookup config values and append to a list of files whose permissions we need to check"""
from stai.ssl.create_ssl import get_mozilla_ca_crt
all_certs: List[Path] = []
all_keys: List[Path] = []
try:
config: Dict = load_config(root_path, "config.yaml", exit_on_error=False)
for paths, parsed_list in [(CERT_CONFIG_KEY_PATHS, all_certs), (KEY_CONFIG_KEY_PATHS, all_keys)]:
for path in paths:
try:
file = root_path / Path(traverse_dict(config, path))
parsed_list.append(file)
except Exception as e:
print(
f"Failed to lookup config value for {path}: {e}"
) # lgtm [py/clear-text-logging-sensitive-data]
# Check the Mozilla Root CAs as well
all_certs.append(Path(get_mozilla_ca_crt()))
except (FileNotFoundError, ValueError):
pass
return all_certs, all_keys
def get_ssl_perm_warning(path: Path, actual_mode: int, expected_mode: int) -> str:
return (
f"Permissions {octal_mode_string(actual_mode)} for "
f"'{path}' are too open. " # lgtm [py/clear-text-logging-sensitive-data]
f"Expected {octal_mode_string(expected_mode)}"
)
def verify_ssl_certs_and_keys(
cert_paths: List[Path], key_paths: List[Path], log: Optional[Logger] = None
) -> List[Tuple[Path, int, int]]:
"""Check that file permissions are properly set for the provided SSL cert and key files"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return []
invalid_files_and_modes: List[Tuple[Path, int, int]] = []
def verify_paths(paths: List[Path], restrict_mask: int, expected_permissions: int):
nonlocal invalid_files_and_modes
for path in paths:
try:
# Check that the file permissions are not too permissive
is_valid, actual_permissions = verify_file_permissions(path, restrict_mask)
if not is_valid:
if log is not None:
log.error(get_ssl_perm_warning(path, actual_permissions, expected_permissions))
warned_ssl_files.add(path)
invalid_files_and_modes.append((path, actual_permissions, expected_permissions))
except Exception as e:
print(f"Unable to check permissions for {path}: {e}") # lgtm [py/clear-text-logging-sensitive-data]
verify_paths(cert_paths, RESTRICT_MASK_CERT_FILE, DEFAULT_PERMISSIONS_CERT_FILE)
verify_paths(key_paths, RESTRICT_MASK_KEY_FILE, DEFAULT_PERMISSIONS_KEY_FILE)
return invalid_files_and_modes
def check_ssl(root_path: Path) -> None:
"""
Sanity checks on the SSL configuration. Checks that file permissions are properly
set on the keys and certs, warning and exiting if permissions are incorrect.
"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return None
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
invalid_files = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
if len(invalid_files):
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@ WARNING: UNPROTECTED SSL FILE! @")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
for path, actual_permissions, expected_permissions in invalid_files:
print(
get_ssl_perm_warning(path, actual_permissions, expected_permissions)
) # lgtm [py/clear-text-logging-sensitive-data]
print("One or more SSL files were found with permission issues.")
print("Run `stai init --fix-ssl-permissions` to fix issues.")
def check_and_fix_permissions_for_ssl_file(file: Path, mask: int, updated_mode: int) -> Tuple[bool, bool]:
"""Check file permissions and attempt to fix them if found to be too open"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return True, False
valid: bool = True
updated: bool = False
# Check that the file permissions are not too permissive
try:
(good_perms, mode) = verify_file_permissions(file, mask)
if not good_perms:
valid = False
print(
f"Attempting to set permissions {octal_mode_string(updated_mode)} on "
f"{file}" # lgtm [py/clear-text-logging-sensitive-data]
)
os.chmod(str(file), updated_mode)
updated = True
except Exception as e:
print(f"Failed to change permissions on {file}: {e}") # lgtm [py/clear-text-logging-sensitive-data]
valid = False
return valid, updated
def fix_ssl(root_path: Path) -> None:
"""Attempts to fix SSL cert/key file permissions that are too open"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return None
updated: bool = False
encountered_error: bool = False
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
files_to_fix = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
for (file, mask, updated_mode) in files_to_fix:
# Check that permissions are correct, and if not, attempt to fix
(valid, fixed) = check_and_fix_permissions_for_ssl_file(file, mask, updated_mode)
if fixed:
updated = True
if not valid and not fixed:
encountered_error = True
if encountered_error:
print("One or more errors were encountered while updating SSL file permissions...")
elif updated:
print("Finished updating SSL file permissions")
else:
print("SSL file permissions are correct")
|
"""webdev URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('webdev.produtos.urls')),
path('users/', include('webdev.users.urls')),
path('fornecedores/', include('webdev.fornecedores.urls')),
path('materiais/', include('webdev.materiais.urls')),
path('financeiro/', include('webdev.financeiro.urls')),
path('vendas/', include('webdev.vendas.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import httplib2
import mock
import os
import pickle
import pytest
import socket
import sys
import tests
import time
from six.moves import urllib
@pytest.mark.skipif(
sys.version_info <= (3,),
reason=(
"TODO: httplib2._convert_byte_str was defined only in python3 code " "version"
),
)
def test_convert_byte_str():
with tests.assert_raises(TypeError):
httplib2._convert_byte_str(4)
assert httplib2._convert_byte_str(b"Hello") == "Hello"
assert httplib2._convert_byte_str("World") == "World"
def test_reflect():
http = httplib2.Http()
with tests.server_reflect() as uri:
response, content = http.request(uri + "?query", "METHOD")
assert response.status == 200
host = urllib.parse.urlparse(uri).netloc
assert content.startswith(
"""\
METHOD /?query HTTP/1.1\r\n\
Host: {host}\r\n""".format(
host=host
).encode()
), content
def test_pickle_http():
http = httplib2.Http(cache=tests.get_cache_path())
new_http = pickle.loads(pickle.dumps(http))
assert tuple(sorted(new_http.__dict__)) == tuple(sorted(http.__dict__))
assert new_http.credentials.credentials == http.credentials.credentials
assert new_http.certificates.credentials == http.certificates.credentials
assert new_http.cache.cache == http.cache.cache
for key in new_http.__dict__:
if key not in ("cache", "certificates", "credentials"):
assert getattr(new_http, key) == getattr(http, key)
def test_pickle_http_with_connection():
http = httplib2.Http()
http.request("http://random-domain:81/", connection_type=tests.MockHTTPConnection)
new_http = pickle.loads(pickle.dumps(http))
assert tuple(http.connections) == ("http:random-domain:81",)
assert new_http.connections == {}
def test_pickle_custom_request_http():
http = httplib2.Http()
http.request = lambda: None
http.request.dummy_attr = "dummy_value"
new_http = pickle.loads(pickle.dumps(http))
assert getattr(new_http.request, "dummy_attr", None) is None
@pytest.mark.xfail(
sys.version_info >= (3,),
reason=(
"FIXME: for unknown reason global timeout test fails in Python3 "
"with response 200"
),
)
def test_timeout_global():
def handler(request):
time.sleep(0.5)
return tests.http_response_bytes()
try:
socket.setdefaulttimeout(0.1)
except Exception:
pytest.skip("cannot set global socket timeout")
try:
http = httplib2.Http()
http.force_exception_to_status_code = True
with tests.server_request(handler) as uri:
response, content = http.request(uri)
assert response.status == 408
assert response.reason.startswith("Request Timeout")
finally:
socket.setdefaulttimeout(None)
def test_timeout_individual():
def handler(request):
time.sleep(0.5)
return tests.http_response_bytes()
http = httplib2.Http(timeout=0.1)
http.force_exception_to_status_code = True
with tests.server_request(handler) as uri:
response, content = http.request(uri)
assert response.status == 408
assert response.reason.startswith("Request Timeout")
def test_timeout_https():
c = httplib2.HTTPSConnectionWithTimeout("localhost", 80, timeout=47)
assert 47 == c.timeout
# @pytest.mark.xfail(
# sys.version_info >= (3,),
# reason='[py3] last request should open new connection, but client does not realize socket was closed by server',
# )
def test_connection_close():
http = httplib2.Http()
g = []
def handler(request):
g.append(request.number)
return tests.http_response_bytes(proto="HTTP/1.1")
with tests.server_request(handler, request_count=3) as uri:
http.request(uri, "GET") # conn1 req1
for c in http.connections.values():
assert c.sock is not None
http.request(uri, "GET", headers={"connection": "close"})
time.sleep(0.7)
http.request(uri, "GET") # conn2 req1
assert g == [1, 2, 1]
def test_get_end2end_headers():
# one end to end header
response = {"content-type": "application/atom+xml", "te": "deflate"}
end2end = httplib2._get_end2end_headers(response)
assert "content-type" in end2end
assert "te" not in end2end
assert "connection" not in end2end
# one end to end header that gets eliminated
response = {
"connection": "content-type",
"content-type": "application/atom+xml",
"te": "deflate",
}
end2end = httplib2._get_end2end_headers(response)
assert "content-type" not in end2end
assert "te" not in end2end
assert "connection" not in end2end
# Degenerate case of no headers
response = {}
end2end = httplib2._get_end2end_headers(response)
assert len(end2end) == 0
# Degenerate case of connection referrring to a header not passed in
response = {"connection": "content-type"}
end2end = httplib2._get_end2end_headers(response)
assert len(end2end) == 0
@pytest.mark.xfail(
os.environ.get("TRAVIS_PYTHON_VERSION") in ("2.7", "pypy"),
reason="FIXME: fail on Travis py27 and pypy, works elsewhere",
)
@pytest.mark.parametrize("scheme", ("http", "https"))
def test_ipv6(scheme):
# Even if IPv6 isn't installed on a machine it should just raise socket.error
uri = "{scheme}://[::1]:1/".format(scheme=scheme)
try:
httplib2.Http(timeout=0.1).request(uri)
except socket.gaierror:
assert False, "should get the address family right for IPv6"
except socket.error:
pass
@pytest.mark.parametrize(
"conn_type",
(httplib2.HTTPConnectionWithTimeout, httplib2.HTTPSConnectionWithTimeout),
)
def test_connection_proxy_info_attribute_error(conn_type):
# HTTPConnectionWithTimeout did not initialize its .proxy_info attribute
# https://github.com/httplib2/httplib2/pull/97
# Thanks to Joseph Ryan https://github.com/germanjoey
conn = conn_type("no-such-hostname.", 80)
# TODO: replace mock with dummy local server
with tests.assert_raises(socket.gaierror):
with mock.patch("socket.socket.connect", side_effect=socket.gaierror):
conn.request("GET", "/")
def test_http_443_forced_https():
http = httplib2.Http()
http.force_exception_to_status_code = True
uri = "http://localhost:443/"
# sorry, using internal structure of Http to check chosen scheme
with mock.patch("httplib2.Http._request") as m:
http.request(uri)
assert len(m.call_args) > 0, "expected Http._request() call"
conn = m.call_args[0][0]
assert isinstance(conn, httplib2.HTTPConnectionWithTimeout)
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: liaoxingyu2@jd.com
"""
import math
import random
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
|
import platform
import sys
import mock
import pytest
from urllib3.util import ssl_
from urllib3.exceptions import SNIMissingWarning
@pytest.mark.parametrize(
"addr",
[
# IPv6
"::1",
"::",
"FE80::8939:7684:D84b:a5A4%251",
# IPv4
"127.0.0.1",
"8.8.8.8",
b"127.0.0.1",
# IPv6 w/ Zone IDs
"FE80::8939:7684:D84b:a5A4%251",
b"FE80::8939:7684:D84b:a5A4%251",
"FE80::8939:7684:D84b:a5A4%19",
b"FE80::8939:7684:D84b:a5A4%19",
],
)
def test_is_ipaddress_true(addr):
assert ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
"addr",
[
"www.python.org",
b"www.python.org",
"v2.sg.media-imdb.com",
b"v2.sg.media-imdb.com",
],
)
def test_is_ipaddress_false(addr):
assert not ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "uses_sni"],
[
(True, "127.0.0.1", False),
(False, "www.python.org", False),
(False, "0.0.0.0", False),
(True, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_context_sni_with_ip_address(monkeypatch, has_sni, server_hostname, uses_sni):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if uses_sni:
context.wrap_socket.assert_called_with(sock, server_hostname=server_hostname)
else:
context.wrap_socket.assert_called_with(sock)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "should_warn"],
[
(True, "www.google.com", False),
(True, "127.0.0.1", False),
(False, "127.0.0.1", False),
(False, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_sni_missing_warning_with_ip_addresses(
monkeypatch, has_sni, server_hostname, should_warn
):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
with mock.patch("warnings.warn") as warn:
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if should_warn:
assert warn.call_count >= 1
warnings = [call[0][1] for call in warn.call_args_list]
assert SNIMissingWarning in warnings
else:
assert warn.call_count == 0
@pytest.mark.parametrize(
["ciphers", "expected_ciphers"],
[
(None, ssl_.DEFAULT_CIPHERS),
("ECDH+AESGCM:ECDH+CHACHA20", "ECDH+AESGCM:ECDH+CHACHA20"),
],
)
def test_create_urllib3_context_set_ciphers(monkeypatch, ciphers, expected_ciphers):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context(ciphers=ciphers) is context
assert context.set_ciphers.call_count == 1
assert context.set_ciphers.call_args == mock.call(expected_ciphers)
def test_wrap_socket_given_context_no_load_default_certs():
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ssl_context=context)
context.load_default_certs.assert_not_called()
def test_wrap_socket_given_ca_certs_no_load_default_certs(monkeypatch):
if platform.python_implementation() == "PyPy" and sys.version_info[0] == 2:
# https://github.com/testing-cabal/mock/issues/438
pytest.xfail("fails with PyPy for Python 2 dues to funcsigs bug")
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ca_certs="/tmp/fake-file")
context.load_default_certs.assert_not_called()
context.load_verify_locations.assert_called_with("/tmp/fake-file", None)
def test_wrap_socket_default_loads_default_certs(monkeypatch):
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock)
context.load_default_certs.assert_called_with()
@pytest.mark.parametrize(
["pha", "expected_pha"], [(None, None), (False, True), (True, True)]
)
def test_create_urllib3_context_pha(monkeypatch, pha, expected_pha):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
context.post_handshake_auth = pha
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context() is context
assert context.post_handshake_auth == expected_pha
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'mypackage'
DESCRIPTION = 'My short description for my project.'
URL = 'https://github.com/vincent101/myproject'
EMAIL = 'vincent.wangworks@gmail.com'
AUTHOR = 'Vicnet Wang'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
from setuptools import setup
setup(
name='proxy-pool',
version='1.0.0',
description='High performance proxy pool',
long_description='A proxy pool project modified from WiseDoge/ProxyPool',
author=['Germey', 'WiseDoge'],
author_email='cqc@cuiqingcai.com',
url='https://github.com/Germey/ProxyPool',
packages=[
'proxy-pool'
],
py_modules=['run'],
include_package_data=True,
platforms='any',
install_requires=[
'aiohttp',
'requests',
'flask',
'redis',
'pyquery'
],
entry_points={
'console_scripts': ['proxy_pool_run=run:cli']
},
license='apache 2.0',
zip_safe=False,
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython'
]
)
|
import yaml
from enum import Enum
class SimulationType(Enum):
explosion = "EXPLOSION"
collision = "COLLISION"
class SatType(Enum):
rb = "RB"
sat = "SC"
soc = "SOC"
deb = "DEB"
class SimulationConfiguration:
# Takes a .yaml file with simulation configurations
def __init__(self, filePath: str):
try:
with open(filePath, 'r') as stream:
data_loaded = yaml.safe_load(stream)
self._minimalCharacteristicLength = float(
data_loaded['minimalCharacteristicLength'])
self._simulationType = SimulationType(data_loaded['simulationType'].upper())
self._sat_type = SatType(data_loaded['satType'].upper())
self._mass_conservation = bool(data_loaded['massConservation'])
stream.close()
except Exception as e:
print(f"Exception: {e}")
@property
def minimalCharacteristicLength(self) -> float:
return self._minimalCharacteristicLength
@property
def simulationType(self) -> SimulationType:
return self._simulationType
@property
def sat_type(self) -> SatType:
return self._sat_type
@property
def mass_conservation(self) -> bool:
return self._mass_conservation
|
"""hellodjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponse, JsonResponse
def home_page(request):
# assert False, request.META['HTTP_USER_AGENT']
# return HttpResponse("Hello <b>World!</b>", content_type="text/plain")
return HttpResponse("Hello <b>World!</b>")
# return JsonResponse({
# 'a':'b',
# 'c':'d',
# })
def age(request, name, value): # view function
return HttpResponse("{}, you are {} years old".format(name.title(), value))
def mult(request, first, second):
return HttpResponse("{} X {} = {}".format(first, second, (int(first) * int(second))))
def throw_404(request):
return HttpResponse("404 Error", status=404)
# def go(request):
# assert False, request.GET
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^age/(?P<name>\w+)/(?P<value>\d+)/$', age),
url(r'^mult/(?P<first>\d+)/(?P<second>\d+)/$', mult),
url(r'^$', home_page),
url(r'$', throw_404),
# url(r'age/(\w+)/$', age),
]
|
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import unittest
import numpy as np
from sklearn.mixture import GaussianMixture
from graspologic.plot.plot import (
_sort_inds,
gridplot,
heatmap,
pairplot,
pairplot_with_gmm,
)
from graspologic.simulations.simulations import er_np, sbm
def _test_pairplot_with_gmm_inputs(caller: unittest.TestCase, **kws):
X = np.random.rand(15, 3)
gmm = GaussianMixture(n_components=3, **kws).fit(X)
labels = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
# test data
with caller.assertRaises(ValueError):
pairplot_with_gmm(X="test", gmm=gmm)
with caller.assertRaises(ValueError):
pairplot_with_gmm(X=X, gmm=gmm, labels=["A"])
with caller.assertRaises(NameError):
pairplot_with_gmm(X, gmm=None)
def _test_pairplot_with_gmm_outputs(**kws):
X = np.random.rand(15, 3)
gmm = GaussianMixture(n_components=3, **kws).fit(X)
labels = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
cluster_palette = {0: "red", 1: "blue", 2: "green"}
label_palette = {"A": "red", "B": "blue", "C": "green"}
fig = pairplot_with_gmm(X, gmm)
fig = pairplot_with_gmm(
X,
gmm,
labels=labels,
cluster_palette=cluster_palette,
label_palette=label_palette,
)
class TestPlot(unittest.TestCase):
def test_common_inputs(self):
X = er_np(100, 0.5)
grid_labels = ["Test1"]
# test figsize
with self.assertRaises(TypeError):
figsize = "bad figsize"
heatmap(X, figsize=figsize)
# test height
height = "1"
with self.assertRaises(TypeError):
gridplot([X], grid_labels, height=height)
with self.assertRaises(TypeError):
pairplot(X, height=height)
# test title
title = 1
with self.assertRaises(TypeError):
heatmap(X, title=title)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, title=title)
with self.assertRaises(TypeError):
pairplot(X, title=title)
# test context
context = 123
with self.assertRaises(TypeError):
heatmap(X, context=context)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, context=context)
with self.assertRaises(TypeError):
pairplot(X, context=context)
context = "journal"
with self.assertRaises(ValueError):
heatmap(X, context=context)
with self.assertRaises(ValueError):
gridplot([X], grid_labels, context=context)
with self.assertRaises(ValueError):
pairplot(X, context=context)
# test font scales
font_scales = ["1", []]
for font_scale in font_scales:
with self.assertRaises(TypeError):
heatmap(X, font_scale=font_scale)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, font_scale=font_scale)
with self.assertRaises(TypeError):
pairplot(X, cont_scale=font_scale)
# ticklabels
with self.assertRaises(TypeError):
xticklabels = "labels"
yticklabels = "labels"
heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)
with self.assertRaises(ValueError):
xticklabels = ["{}".format(i) for i in range(5)]
yticklabels = ["{}".format(i) for i in range(5)]
heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)
with self.assertRaises(TypeError):
heatmap(X, title_pad="f")
with self.assertRaises(TypeError):
gridplot([X], title_pad="f")
with self.assertRaises(TypeError):
heatmap(X, hier_label_fontsize="f")
with self.assertRaises(TypeError):
gridplot([X], hier_label_fontsize="f")
def test_heatmap_inputs(self):
"""
test parameter checks
"""
X = np.random.rand(10, 10)
with self.assertRaises(TypeError):
heatmap(X="input")
# transform
with self.assertRaises(ValueError):
transform = "bad transform"
heatmap(X, transform=transform)
# cmap
with self.assertRaises(TypeError):
cmap = 123
heatmap(X, cmap=cmap)
# center
with self.assertRaises(TypeError):
center = "center"
heatmap(X, center=center)
# cbar
with self.assertRaises(TypeError):
cbar = 1
heatmap(X, cbar=cbar)
def test_heatmap_output(self):
"""
simple function to see if plot is made without errors
"""
X = er_np(10, 0.5)
xticklabels = ["Dimension {}".format(i) for i in range(10)]
yticklabels = ["Dimension {}".format(i) for i in range(10)]
fig = heatmap(
X, transform="log", xticklabels=xticklabels, yticklabels=yticklabels
)
fig = heatmap(X, transform="zero-boost")
fig = heatmap(X, transform="simple-all")
fig = heatmap(X, transform="simple-nonzero")
fig = heatmap(X, transform="binarize")
fig = heatmap(X, cmap="gist_rainbow")
def test_gridplot_inputs(self):
X = [er_np(10, 0.5)]
labels = ["ER(10, 0.5)"]
with self.assertRaises(TypeError):
gridplot(X="input", labels=labels)
with self.assertRaises(ValueError):
gridplot(X, labels=["a", "b"])
# transform
with self.assertRaises(ValueError):
transform = "bad transform"
gridplot(X, labels=labels, transform=transform)
def test_gridplot_outputs(self):
"""
simple function to see if plot is made without errors
"""
X = [er_np(10, 0.5) for _ in range(2)]
labels = ["Random A", "Random B"]
fig = gridplot(X, labels)
fig = gridplot(X, labels, transform="zero-boost")
fig = gridplot(X, labels, "simple-all", title="Test", font_scale=0.9)
def test_pairplot_inputs(self):
X = np.random.rand(15, 3)
Y = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
# test data
with self.assertRaises(TypeError):
pairplot(X="test")
with self.assertRaises(ValueError):
pairplot(X=X, labels=["A"])
with self.assertRaises(TypeError):
pairplot(X, col_names="A")
with self.assertRaises(ValueError):
pairplot(X, col_names=["1", "2"])
with self.assertRaises(ValueError):
pairplot(X, col_names=["1", "2", "3"], variables=[1, 2, 3, 4])
with self.assertRaises(KeyError):
pairplot(X, col_names=["1", "2", "3"], variables=["A", "B"])
def test_pairplot_outputs(self):
X = np.random.rand(15, 3)
Y = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
col_names = ["Feature1", "Feature2", "Feature3"]
fig = pairplot(X)
fig = pairplot(X, Y)
fig = pairplot(X, Y, col_names)
fig = pairplot(
X,
Y,
col_names,
title="Test",
height=1.5,
variables=["Feature1", "Feature2"],
)
def test_pairplot_with_gmm_inputs_type_full(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="full")
def test_pairplot_with_gmm_inputs_type_diag(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="diag")
def test_pairplot_with_gmm_inputs_type_tied(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="tied")
def test_pairplot_with_gmm_inputs_type_spherical(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="spherical")
def test_pairplot_with_gmm_outputs_type_full(self):
_test_pairplot_with_gmm_outputs(covariance_type="full")
def test_pairplot_with_gmm_outputs_type_diag(self):
_test_pairplot_with_gmm_outputs(covariance_type="diag")
def test_pairplot_with_gmm_outputs_type_tied(self):
_test_pairplot_with_gmm_outputs(covariance_type="tied")
def test_pairplot_with_gmm_outputs_type_spherical(self):
_test_pairplot_with_gmm_outputs(covariance_type="spherical")
def test_sort_inds(self):
B = np.array(
[
[0, 0.2, 0.1, 0.1, 0.1],
[0.2, 0.8, 0.1, 0.3, 0.1],
[0.15, 0.1, 0, 0.05, 0.1],
[0.1, 0.1, 0.2, 1, 0.1],
[0.1, 0.2, 0.1, 0.1, 0.8],
]
)
g = sbm([10, 30, 50, 25, 25], B, directed=True)
degrees = g.sum(axis=0) + g.sum(axis=1)
degree_sort_inds = np.argsort(degrees)
labels2 = 40 * ["0"] + 100 * ["1"]
labels1 = 10 * ["d"] + 30 * ["c"] + 50 * ["d"] + 25 * ["e"] + 25 * ["c"]
labels1 = np.array(labels1)
labels2 = np.array(labels2)
sorted_inds = _sort_inds(g, labels1, labels2, True)
# sort outer blocks first if given, sort by num verts in the block
# for inner hier, sort by num verts for that category across the entire graph
# ie if there are multiple inner hier across different outer blocks, sort
# by prevalence in the entire graph, not within block
# this is to make the ordering within outer block consistent
# within a block, sort by degree
# outer block order should thus be: 1, 0
# inner block order should thus be: d, c, e
# show that outer blocks are sorted correctly
labels2 = labels2[sorted_inds]
self.assertTrue(np.all(labels2[:100] == "1"))
self.assertTrue(np.all(labels2[100:] == "0"))
# show that inner blocks are sorted correctly
labels1 = labels1[sorted_inds]
self.assertTrue(np.all(labels1[:50] == "d"))
self.assertTrue(np.all(labels1[50:75] == "c"))
self.assertTrue(np.all(labels1[75:100] == "e"))
self.assertTrue(np.all(labels1[100:110] == "d"))
self.assertTrue(np.all(labels1[110:] == "c"))
# show that within block, everything is in descending degree order
degrees = degrees[sorted_inds]
self.assertTrue(np.all(np.diff(degrees[:50]) <= 0))
self.assertTrue(np.all(np.diff(degrees[50:75]) <= 0))
self.assertTrue(np.all(np.diff(degrees[75:100]) <= 0))
self.assertTrue(np.all(np.diff(degrees[100:110]) <= 0))
self.assertTrue(np.all(np.diff(degrees[110:]) <= 0))
|
from dbgscript import *
thd = Process.current_thread
print(thd)
frame = thd.current_frame
locals = frame.get_locals()
print(locals)
for l in locals: print(l.name)
for l in locals: print(l.name, l.type)
car1 = locals[0]
print(car1.name)
car1_f = car1['f']
print(car1_f)
print(car1_f.name, car1_f.type)
print(car1_f.name, car1_f.type, car1_f.size)
foo_c = car1_f['c']
print(foo_c)
print(foo_c.name)
print(foo_c.name, foo_c.type)
print(foo_c.name, foo_c.type, foo_c.size, hex(foo_c.address), foo_c.value)
# car1_f['xyz'] # no such field
print(car1_f['arr'])
print(car1_f['arr'][0].value)
print(len(car1_f['arr']))
#some_foo_ptr = Process.read_ptr(0x000007f913ef9c0)
#print(hex(some_foo_ptr))
print (hex(car1_f.address), hex(car1_f.value))
|
"""Tests for pywemo.ouimeaux_device.api.service."""
import unittest.mock as mock
from xml.etree import ElementTree
from xml.etree import cElementTree as cet
import pytest
import requests
import pywemo.ouimeaux_device.api.service as svc
HEADERS_KWARG_KEY = "headers"
CONTENT_TYPE_KEY = "Content-Type"
SOAPACTION_KEY = "SOAPACTION"
MOCK_ARGS_ORDERED = 0
MOCK_ARGS_KWARGS = 1
svc.LOG = mock.Mock()
MOCK_RESPONSE = (
b'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
b' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
b'<s:Body>\n<u:GetInsightParamsResponse xmlns:u="urn:Belkin:service:metainfo:1">'
b"\r\n<InsightParams>0|1604849509|85|1315|27628|1209600|772|0|21689183|386799026.000000|8000"
b"</InsightParams>\r\n</u:GetInsightParamsResponse>\r\n</s:Body> </s:Envelope>"
)
class TestAction:
@staticmethod
def get_mock_action(name="", service_type="", url=""):
device = mock.Mock()
service = mock.Mock()
service.serviceType = service_type
service.controlURL = url
action_config = mock.MagicMock()
action_config.get_name = lambda: name
return svc.Action(device, service, action_config)
@staticmethod
def get_et_mock():
resp = cet.fromstring(MOCK_RESPONSE)
return mock.MagicMock(return_value=resp)
def test_call_post_request_is_made_exactly_once_when_successful(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
assert post_mock.call_count == 1
def test_call_request_has_well_formed_xml_body(self):
action = self.get_mock_action(name="cool_name", service_type="service")
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
body = post_mock.call_args[MOCK_ARGS_ORDERED][1]
ElementTree.fromstring(body) # will raise error if xml is malformed
def test_call_request_has_correct_header_keys(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
for header in [CONTENT_TYPE_KEY, SOAPACTION_KEY]:
assert header in headers
def test_call_headers_has_correct_content_type(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
content_type_header = headers[CONTENT_TYPE_KEY]
assert content_type_header == "text/xml"
def test_call_headers_has_correct_soapaction(self):
service_type = "some_service"
name = "cool_name"
action = self.get_mock_action(name, service_type)
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
soapaction_header = headers[SOAPACTION_KEY]
assert soapaction_header == '"%s#%s"' % (service_type, name)
def test_call_headers_has_correct_url(self):
url = "http://www.github.com/"
action = self.get_mock_action(url=url)
requests.post = post_mock = mock.Mock()
action()
actual_url = post_mock.call_args[MOCK_ARGS_ORDERED][0]
assert actual_url == url
def test_call_request_is_tried_up_to_max_on_communication_error(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock(
side_effect=requests.exceptions.RequestException
)
try:
action()
except svc.ActionException:
pass
assert post_mock.call_count == svc.MAX_RETRIES
def test_call_throws_when_final_retry_fails(self):
action = self.get_mock_action()
requests.post = mock.Mock(
side_effect=requests.exceptions.RequestException
)
with pytest.raises(svc.ActionException):
action()
def test_call_returns_correct_dictionary_with_response_contents(self):
action = self.get_mock_action()
requests.post = mock.Mock()
envelope = cet.Element("soapEnvelope")
body = cet.SubElement(envelope, "soapBody")
response = cet.SubElement(body, "soapResponse")
response_content = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
for key, value in response_content.items():
element = cet.SubElement(response, key)
element.text = value
cet.fromstring = mock.MagicMock(return_value=envelope)
actual_responses = action()
assert actual_responses == response_content
|
from pydantic import AnyHttpUrl
from typing import List
import os
ENV = os.environ.get("fast_env", "DEV") # 本次启动环境
class Settings:
APP_NAME = "fastapi-vue-admin"
# api前缀
API_PREFIX = "/api"
# jwt密钥,建议随机生成一个
SECRET_KEY = "ShsUP9qIP2Xui2GpXRY6y74v2JSVS0Q2YOXJ22VjwkI"
# token过期时间
ACCESS_TOKEN_EXPIRE_MINUTES = 24 * 60
# 跨域白名单
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = ["http://localhost:9528"]
# db配置
DB_URL = "mysql+pymysql://root:Aa123456@127.0.0.1:3306/fast"
# 启动端口配置
PORT = 8999
# 是否热加载
RELOAD = True
settings = Settings()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for database migrations for the API database.
These are "opportunistic" tests which allow testing against all three databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up DBs named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
from alembic import command as alembic_api
from alembic import script as alembic_script
from migrate.versioning import api as migrate_api
import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
from oslo_log import log as logging
import testtools
from nova.db.api import models
from nova.db import migration
from nova import test
LOG = logging.getLogger(__name__)
class NovaModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
"""Test that the models match the database after migrations are run."""
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
def db_sync(self, engine):
with mock.patch.object(migration, '_get_engine', return_value=engine):
migration.db_sync(database='api')
def get_engine(self):
return self.engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model.
if name == 'migrate_version':
return False
return True
def filter_metadata_diff(self, diff):
# Filter out diffs that shouldn't cause a sync failure.
new_diff = []
# Define a whitelist of ForeignKeys that exist on the model but not in
# the database. They will be removed from the model at a later time.
fkey_whitelist = {'build_requests': ['request_spec_id']}
# Define a whitelist of columns that will be removed from the
# DB at a later release and aren't on a model anymore.
column_whitelist = {
'build_requests': [
'vm_state', 'instance_metadata',
'display_name', 'access_ip_v6', 'access_ip_v4', 'key_name',
'locked_by', 'image_ref', 'progress', 'request_spec_id',
'info_cache', 'user_id', 'task_state', 'security_groups',
'config_drive',
],
'resource_providers': ['can_host'],
}
for element in diff:
if isinstance(element, list):
# modify_nullable is a list
new_diff.append(element)
else:
# tuple with action as first element. Different actions have
# different tuple structures.
if element[0] == 'add_fk':
fkey = element[1]
tablename = fkey.table.name
column_keys = fkey.column_keys
if (
tablename in fkey_whitelist and
column_keys == fkey_whitelist[tablename]
):
continue
elif element[0] == 'remove_column':
tablename = element[2]
column = element[3]
if (
tablename in column_whitelist and
column.name in column_whitelist[tablename]
):
continue
new_diff.append(element)
return new_diff
class TestModelsSyncSQLite(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsSyncMySQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsSyncPostgreSQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
"""Test that the models match the database after old migrations are run."""
def db_sync(self, engine):
# the 'nova.db.migration.db_sync' method will not use the legacy
# sqlalchemy-migrate-based migration flow unless the database is
# already controlled with sqlalchemy-migrate, so we need to manually
# enable version controlling with this tool to test this code path
repository = migration._find_migrate_repo(database='api')
migrate_api.version_control(
engine, repository, migration.MIGRATE_INIT_VERSION['api'])
# now we can apply migrations as expected and the legacy path will be
# followed
super().db_sync(engine)
class TestModelsLegacySyncSQLite(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsLegacySyncMySQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsLegacySyncPostgreSQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
self.init_version = migration.ALEMBIC_INIT_VERSION['api']
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
return
self.assertIsNotNone(
getattr(self, '_check_%s' % revision, None),
(
'API DB Migration %s does not have a test; you must add one'
) % revision,
)
pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None)
if pre_upgrade:
pre_upgrade(connection)
alembic_api.upgrade(self.config, revision)
post_upgrade = getattr(self, '_check_%s' % revision, None)
if post_upgrade:
post_upgrade(connection)
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
There's no good reason for us to have diverging history, so validate
that only one base revision exists. This will prevent simple errors
where people forget to specify the base revision. If this fail for your
change, look for migrations that do not have a 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_bases()))
def test_single_head_revision(self):
"""Ensure we only have a single head revision.
There's no good reason for us to have diverging history, so validate
that only one head revision exists. This will prevent merge conflicts
adding additional head revision points. If this fail for your change,
look for migrations with the same 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_heads()))
def test_walk_versions(self):
with self.engine.begin() as connection:
self.config.attributes['connection'] = connection
script = alembic_script.ScriptDirectory.from_config(self.config)
revisions = [x.revision for x in script.walk_revisions()]
# for some reason, 'walk_revisions' gives us the revisions in
# reverse chronological order so we have to invert this
revisions.reverse()
self.assertEqual(revisions[0], self.init_version)
for revision in revisions:
LOG.info('Testing revision %s', revision)
self._migrate_up(connection, revision)
def test_db_version_alembic(self):
migration.db_sync(database='api')
script = alembic_script.ScriptDirectory.from_config(self.config)
head = script.get_current_head()
self.assertEqual(head, migration.db_version(database='api'))
class TestMigrationsWalkSQLite(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
pass
class TestMigrationsWalkMySQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestMigrationsWalkPostgreSQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
|
import icmplib
from pipecheck.api import CheckResult, Err, Ok, Probe, Warn
class PingProbe(Probe):
"""ICMP ping check"""
host: str = ""
ping_count: int = 1
def __call__(self) -> CheckResult:
h = icmplib.ping(self.host, privileged=False, count=self.ping_count)
if h.is_alive:
if h.packet_loss > 0.0:
return Warn(f"ICMP '{self.host}' ({h.address}) unreliable! packet loss {h.packet_loss*100}%")
return Ok(f"ICMP '{self.host}' reachable ({h.avg_rtt}ms)")
return Err(f"ICMP '{self.host}' unreachable")
|
"""A merge sort which accepts an array as input and recursively
splits an array in half and sorts and combines them.
"""
"""https://en.wikipedia.org/wiki/Merge_sort """
def merge(arr: list[int]) -> list[int]:
"""Return a sorted array.
>>> merge([10,9,8,7,6,5,4,3,2,1])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> merge([1,2,3,4,5,6,7,8,9,10])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> merge([10,22,1,2,3,9,15,23])
[1, 2, 3, 9, 10, 15, 22, 23]
>>> merge([100])
[100]
>>> merge([])
[]
"""
if len(arr) > 1:
middle_length = len(arr) // 2 # Finds the middle of the array
left_array = arr[
:middle_length
] # Creates an array of the elements in the first half.
right_array = arr[
middle_length:
] # Creates an array of the elements in the second half.
left_size = len(left_array)
right_size = len(right_array)
merge(left_array) # Starts sorting the left.
merge(right_array) # Starts sorting the right
left_index = 0 # Left Counter
right_index = 0 # Right Counter
index = 0 # Position Counter
while (
left_index < left_size and right_index < right_size
): # Runs until the lowers size of the left and right are sorted.
if left_array[left_index] < right_array[right_index]:
arr[index] = left_array[left_index]
left_index = left_index + 1
else:
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
while (
left_index < left_size
): # Adds the left over elements in the left half of the array
arr[index] = left_array[left_index]
left_index = left_index + 1
index = index + 1
while (
right_index < right_size
): # Adds the left over elements in the right half of the array
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
return arr
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# -*- coding: utf-8 -*-
import hmac
import hashlib
import base64
"""
unit : utils
descritption: Collection of functions used in all projetcts
author : Alcindo Schleder
version : 1.0.0
package : i-City Identification Plataform
"""
def isnumber(value):
try:
float(value)
except ValueError:
return False
return True
def calcFileSignature(data: str, password: str = None):
if (password):
digest = hmac.new(password, msg=data, digestmod=hashlib.sha256).digest()
resHash = base64.b64encode(digest).decode()
else:
hasher = hashlib.sha256()
hasher.update(data)
resHash = hasher.hexdigest()
return resHash
|
# -*- coding: utf-8 -*-
'''
:file: app.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/21 12:44:37
'''
import os
import click
from apiflask import APIFlask, abort
from app.config import config
from app.models import TodoList
from app.extensions import db, cors
from app.api.todo import todo_bp
def create_app(config_name: str = None) -> APIFlask:
"""构造工厂
Args:
config_name (str, optional): 配置文件名. Defaults to None.
Returns:
APIFlask: falsk app 实例
"""
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
app = APIFlask(__name__)
app.config.from_object(config[config_name])
register_extensions(app)
register_blueprints(app)
register_errors(app)
register_commands(app)
return app
def register_extensions(app: APIFlask):
"""初始化扩展
Args:
app (APIFlask): flask app 实例
"""
db.init_app(app)
cors.init_app(app)
def register_blueprints(app: APIFlask):
app.register_blueprint(todo_bp, url_prefix="/")
def register_errors(app: APIFlask):
pass
# @app.errorhandler(Exception)
# def internal_server_error(e):
# abort(500, message=str(e))
def register_commands(app: APIFlask):
@app.cli.command()
def initdb():
db.drop_all()
db.create_all()
@app.cli.command()
@click.option('--count', default=5, help='Quantity of messages, default is 20.')
def fakedb(count):
from faker import Faker
from datetime import datetime
print(datetime.now())
db.drop_all()
db.create_all()
fake = Faker()
click.echo('Working...')
for _ in range(count):
todo = TodoList(
task=fake.sentence(),
completed=fake.pybool()
)
db.session.add(todo)
db.session.commit()
click.echo('Created %d fake todo items.' % count)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""MobileNet v1.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and different
head (for example: embeddings, localization and classification).
As described in https://arxiv.org/abs/1704.04861.
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications
Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,
Tobias Weyand, Marco Andreetto, Hartwig Adam
100% Mobilenet V1 (base) with input size 224x224:
See mobilenet_v1()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016
MobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672
MobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112
MobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336
MobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112
MobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672
MobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224
MobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168
MobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112
MobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336
MobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224
MobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584
MobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112
MobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792
MobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112
MobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584
MobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224
--------------------------------------------------------------------------------
Total: 3,185,088 567,716,352
75% Mobilenet V1 (base) with input size 128x128:
See mobilenet_v1_075()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208
MobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736
MobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592
MobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368
MobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592
MobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736
MobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184
MobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184
MobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592
MobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368
MobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184
MobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592
MobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592
MobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296
MobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592
MobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592
MobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184
--------------------------------------------------------------------------------
Total: 1,800,144 106,002,432
"""
# Tensorflow mandates these.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
# MOBILENETV1_CONV_DEFS specifies the MobileNet body
MOBILENETV1_CONV_DEFS = [
Conv(kernel=[3, 3], stride=2, depth=32),
DepthSepConv(kernel=[3, 3], stride=1, depth=64),
DepthSepConv(kernel=[3, 3], stride=2, depth=128),
DepthSepConv(kernel=[3, 3], stride=1, depth=128),
DepthSepConv(kernel=[3, 3], stride=2, depth=256),
DepthSepConv(kernel=[3, 3], stride=1, depth=256),
DepthSepConv(kernel=[3, 3], stride=2, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=2, depth=1024),
DepthSepConv(kernel=[3, 3], stride=1, depth=1024)
]
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
use_explicit_padding=False,
scope=None):
"""Mobilenet v1.
Constructs a Mobilenet v1 network from inputs to the given final endpoint.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = MOBILENETV1_CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
"""Mobilenet v1 model for classification.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Input rank is invalid.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(
scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
mobilenet_v1.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def mobilenet_v1_arg_scope(
is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
normalizer_fn=slim.batch_norm):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model. If this is set to
None, the parameter is not added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
normalizer_fn: Normalization function to apply after convolution.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'center': True,
'scale': True,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
}
if is_training is not None:
batch_norm_params['is_training'] = is_training
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = slim.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
|
# coding: utf-8
from typing import List, Dict
from .items.data_items import DataItems
from .shared_data import BaseData
from .full_imports import FullImports
from .from_import import FromImport
class Data(BaseData):
from_imports: List[FromImport]
from_imports_typing: List[FromImport]
extends_map: Dict[str, str]
quote: List[str]
typings: List[str]
requires_typing: bool
full_imports: FullImports
imports: List[str]
extends: List[str]
items: DataItems
|
from typing import List, Optional
from dagster_fivetran.resources import DEFAULT_POLL_INTERVAL
from dagster_fivetran.utils import generate_materializations
from dagster import AssetKey, AssetsDefinition, Out, Output
from dagster import _check as check
from dagster import multi_asset
from dagster.utils.backcompat import experimental
@experimental
def build_fivetran_assets(
connector_id: str,
destination_tables: List[str],
poll_interval: float = DEFAULT_POLL_INTERVAL,
poll_timeout: Optional[float] = None,
io_manager_key: Optional[str] = None,
asset_key_prefix: Optional[List[str]] = None,
) -> List[AssetsDefinition]:
"""
Build a set of assets for a given Fivetran connector.
Returns an AssetsDefintion which connects the specified ``asset_keys`` to the computation that
will update them. Internally, executes a Fivetran sync for a given ``connector_id``, and
polls until that sync completes, raising an error if it is unsuccessful. Requires the use of the
:py:class:`~dagster_fivetran.fivetran_resource`, which allows it to communicate with the
Fivetran API.
Args:
connector_id (str): The Fivetran Connector ID that this op will sync. You can retrieve this
value from the "Setup" tab of a given connector in the Fivetran UI.
destination_tables (List[str]): `schema_name.table_name` for each table that you want to be
represented in the Dagster asset graph for this connection.
poll_interval (float): The time (in seconds) that will be waited between successive polls.
poll_timeout (Optional[float]): The maximum time that will waited before this operation is
timed out. By default, this will never time out.
io_manager_key (Optional[str]): The io_manager to be used to handle each of these assets.
asset_key_prefix (Optional[List[str]]): A prefix for the asset keys inside this asset.
If left blank, assets will have a key of `AssetKey([schema_name, table_name])`.
Examples:
.. code-block:: python
from dagster import AssetKey, build_assets_job
from dagster_fivetran import fivetran_resource
from dagster_fivetran.assets import build_fivetran_assets
my_fivetran_resource = fivetran_resource.configured(
{
"api_key": {"env": "FIVETRAN_API_KEY"},
"api_secret": {"env": "FIVETRAN_API_SECRET"},
}
)
fivetran_assets = build_fivetran_assets(
connector_id="foobar",
table_names=["schema1.table1", "schema2.table2"],
])
my_fivetran_job = build_assets_job(
"my_fivetran_job",
assets=[fivetran_assets],
resource_defs={"fivetran": my_fivetran_resource}
)
"""
asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str)
tracked_asset_keys = {
AssetKey(asset_key_prefix + table.split(".")) for table in destination_tables
}
@multi_asset(
name=f"fivetran_sync_{connector_id}",
outs={
"_".join(key.path): Out(io_manager_key=io_manager_key, asset_key=key)
for key in tracked_asset_keys
},
required_resource_keys={"fivetran"},
compute_kind="fivetran",
)
def _assets(context):
fivetran_output = context.resources.fivetran.sync_and_poll(
connector_id=connector_id,
poll_interval=poll_interval,
poll_timeout=poll_timeout,
)
for materialization in generate_materializations(
fivetran_output, asset_key_prefix=asset_key_prefix
):
# scan through all tables actually created, if it was expected then emit an Output.
# otherwise, emit a runtime AssetMaterialization
if materialization.asset_key in tracked_asset_keys:
yield Output(
value=None,
output_name="_".join(materialization.asset_key.path),
metadata={
entry.label: entry.entry_data for entry in materialization.metadata_entries
},
)
else:
yield materialization
return [_assets]
|
from __future__ import print_function
import os
import numpy as np
import random
import math
from skimage import io
import torch
import torch.utils.data as data
import torchfile
# from utils.utils import *
from utils.imutils import *
from utils.transforms import *
class W300(data.Dataset):
def __init__(self, args, split):
self.nParts = 68
self.pointType = args.pointType
# self.anno = anno
self.img_folder = args.data
self.split = split
self.is_train = True if self.split == 'train' else False
self.anno = self._getDataFaces(self.is_train)
self.total = len(self.anno)
self.scale_factor = args.scale_factor
self.rot_factor = args.rot_factor
self.mean, self.std = self._comput_mean()
def _getDataFaces(self, is_train):
base_dir = self.img_folder
dirs = os.listdir(base_dir)
lines = []
vallines = []
if is_train:
fid = open(os.path.join(base_dir, 'train.txt'), 'r')
for line in fid.readlines():
lines.append(line.strip())
fid.close()
else:
fid = open(os.path.join(base_dir, 'test.txt'), 'r')
for line in fid.readlines():
vallines.append(line.strip())
fid.close()
if is_train:
print('=> loaded train set, {} images were found'.format(len(lines)))
return lines
else:
print('=> loaded validation set, {} images were found'.format(len(vallines)))
return vallines
def __len__(self):
return self.total
def __getitem__(self, index):
inp, out, pts, c, s = self.generateSampleFace(index)
self.pts, self.c, self.s = pts, c, s
if self.is_train:
return inp, out
else:
meta = {'index': index, 'center': c, 'scale': s, 'pts': pts,}
return inp, out, meta
def generateSampleFace(self, idx):
sf = self.scale_factor
rf = self.rot_factor
main_pts = torchfile.load(
os.path.join(self.img_folder, 'landmarks', self.anno[idx].split('_')[0],
self.anno[idx][:-4] + '.t7'))
pts = main_pts[0] if self.pointType == '2D' else main_pts[1]
c = torch.Tensor((450 / 2, 450 / 2 + 50))
s = 1.8
img = load_image(
os.path.join(self.img_folder, self.anno[idx].split('_')[0], self.anno[idx][:-8] +
'.jpg'))
r = 0
if self.is_train:
s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0
if random.random() <= 0.5:
img = torch.from_numpy(fliplr(img.numpy())).float()
pts = shufflelr(pts, width=img.size(2), dataset='w300lp')
c[0] = img.size(2) - c[0]
img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
inp = crop(img, c, s, [256, 256], rot=r)
inp = color_normalize(inp, self.mean, self.std)
tpts = pts.clone()
out = torch.zeros(self.nParts, 64, 64)
for i in range(self.nParts):
if tpts[i, 0] > 0:
tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [64, 64], rot=r))
out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)
return inp, out, pts, c, s
def _comput_mean(self):
meanstd_file = './data/300W_LP/mean.pth.tar'
if os.path.isfile(meanstd_file):
ms = torch.load(meanstd_file)
else:
print("\tcomputing mean and std for the first time, it may takes a while, drink a cup of coffe...")
mean = torch.zeros(3)
std = torch.zeros(3)
if self.is_train:
for i in range(self.total):
a = self.anno[i]
img_path = os.path.join(self.img_folder, self.anno[i].split('_')[0],
self.anno[i][:-8] + '.jpg')
img = load_image(img_path)
mean += img.view(img.size(0), -1).mean(1)
std += img.view(img.size(0), -1).std(1)
mean /= self.total
std /= self.total
ms = {
'mean': mean,
'std': std,
}
torch.save(ms, meanstd_file)
if self.is_train:
print('\tMean: %.4f, %.4f, %.4f' % (ms['mean'][0], ms['mean'][1], ms['mean'][2]))
print('\tStd: %.4f, %.4f, %.4f' % (ms['std'][0], ms['std'][1], ms['std'][2]))
return ms['mean'], ms['std']
|
"""
Stolen from https://github.com/django/django/blob/master/tests/utils_tests/test_dateparse.py at
9718fa2e8abe430c3526a9278dd976443d4ae3c6
Changed to:
* use standard pytest layout
* parametrize tests
"""
from datetime import date, datetime, time, timedelta, timezone
import pytest
from pydantic import BaseModel, ValidationError, errors
from pydantic.datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
def create_tz(minutes):
return timezone(timedelta(minutes=minutes))
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
('1494012444.883309', date(2017, 5, 5)),
(b'1494012444.883309', date(2017, 5, 5)),
(1_494_012_444.883_309, date(2017, 5, 5)),
('1494012444', date(2017, 5, 5)),
(1_494_012_444, date(2017, 5, 5)),
(0, date(1970, 1, 1)),
('2012-04-23', date(2012, 4, 23)),
(b'2012-04-23', date(2012, 4, 23)),
('2012-4-9', date(2012, 4, 9)),
(date(2012, 4, 9), date(2012, 4, 9)),
(datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
# Invalid inputs
('x20120423', errors.DateError),
('2012-04-56', errors.DateError),
(19_999_999_999, date(2603, 10, 11)), # just before watershed
(20_000_000_001, date(1970, 8, 20)), # just after watershed
(1_549_316_052, date(2019, 2, 4)), # nowish in s
(1_549_316_052_104, date(2019, 2, 4)), # nowish in ms
(1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs
(1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns
('infinity', date(9999, 12, 31)),
('inf', date(9999, 12, 31)),
(float('inf'), date(9999, 12, 31)),
('infinity ', date(9999, 12, 31)),
(int('1' + '0' * 100), date(9999, 12, 31)),
(1e1000, date(9999, 12, 31)),
('-infinity', date(1, 1, 1)),
('-inf', date(1, 1, 1)),
('nan', ValueError),
],
)
def test_date_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_date(value)
else:
assert parse_date(value) == result
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
('09:15:00', time(9, 15)),
('10:10', time(10, 10)),
('10:20:30.400', time(10, 20, 30, 400_000)),
(b'10:20:30.400', time(10, 20, 30, 400_000)),
('4:8:16', time(4, 8, 16)),
(time(4, 8, 16), time(4, 8, 16)),
(3610, time(1, 0, 10)),
(3600.5, time(1, 0, 0, 500000)),
(86400 - 1, time(23, 59, 59)),
('11:05:00-05:30', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00-0530', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00Z', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05:00+00', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05-06', time(11, 5, 0, tzinfo=create_tz(-360))),
('11:05+06', time(11, 5, 0, tzinfo=create_tz(360))),
# Invalid inputs
(86400, errors.TimeError),
('xxx', errors.TimeError),
('091500', errors.TimeError),
(b'091500', errors.TimeError),
('09:15:90', errors.TimeError),
('11:05:00Y', errors.TimeError),
('11:05:00-25:00', errors.TimeError),
],
)
def test_time_parsing(value, result):
if result == errors.TimeError:
with pytest.raises(errors.TimeError):
parse_time(value)
else:
assert parse_time(value) == result
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
# values in seconds
('1494012444.883309', datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
(1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
('1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(b'1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
# values in ms
('1494012444000.883309', datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
('-1494012444000.883309', datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
(1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
('2012-04-23T09:15:00', datetime(2012, 4, 23, 9, 15)),
('2012-4-9 4:8:16', datetime(2012, 4, 9, 4, 8, 16)),
('2012-04-23T09:15:00Z', datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
('2012-4-9 4:8:16-0320', datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
('2012-04-23T10:20:30.400+02:30', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
('2012-04-23T10:20:30.400+02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
('2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(b'2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(datetime(2017, 5, 5), datetime(2017, 5, 5)),
(0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
# Invalid inputs
('x20120423091500', errors.DateTimeError),
('2012-04-56T09:15:90', errors.DateTimeError),
('2012-04-23T11:05:00-25:00', errors.DateTimeError),
(19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed
(20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed
(1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s
(1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms
(1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs
(1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns
('infinity', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf ', datetime(9999, 12, 31, 23, 59, 59, 999999)),
(1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
(float('inf'), datetime(9999, 12, 31, 23, 59, 59, 999999)),
('-infinity', datetime(1, 1, 1, 0, 0)),
('-inf', datetime(1, 1, 1, 0, 0)),
('nan', ValueError),
],
)
def test_datetime_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_datetime(value)
else:
assert parse_datetime(value) == result
@pytest.mark.parametrize(
'delta',
[
timedelta(days=4, minutes=15, seconds=30, milliseconds=100), # fractions of seconds
timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
timedelta(days=4, minutes=15, seconds=30), # multiple days
timedelta(days=1, minutes=00, seconds=00), # single day
timedelta(days=-4, minutes=15, seconds=30), # negative durations
timedelta(minutes=15, seconds=30), # minute & seconds
timedelta(seconds=30), # seconds
],
)
def test_parse_python_format(delta):
assert parse_duration(delta) == delta
assert parse_duration(str(delta)) == delta
@pytest.mark.parametrize(
'value,result',
[
# seconds
(timedelta(seconds=30), timedelta(seconds=30)),
('30', timedelta(seconds=30)),
(30, timedelta(seconds=30)),
(30.1, timedelta(seconds=30, milliseconds=100)),
# minutes seconds
('15:30', timedelta(minutes=15, seconds=30)),
('5:30', timedelta(minutes=5, seconds=30)),
# hours minutes seconds
('10:15:30', timedelta(hours=10, minutes=15, seconds=30)),
('1:15:30', timedelta(hours=1, minutes=15, seconds=30)),
('100:200:300', timedelta(hours=100, minutes=200, seconds=300)),
# days
('4 15:30', timedelta(days=4, minutes=15, seconds=30)),
('4 10:15:30', timedelta(days=4, hours=10, minutes=15, seconds=30)),
# fractions of seconds
('15:30.1', timedelta(minutes=15, seconds=30, milliseconds=100)),
('15:30.01', timedelta(minutes=15, seconds=30, milliseconds=10)),
('15:30.001', timedelta(minutes=15, seconds=30, milliseconds=1)),
('15:30.0001', timedelta(minutes=15, seconds=30, microseconds=100)),
('15:30.00001', timedelta(minutes=15, seconds=30, microseconds=10)),
('15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
(b'15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
# negative
('-4 15:30', timedelta(days=-4, minutes=15, seconds=30)),
('-172800', timedelta(days=-2)),
('-15:30', timedelta(minutes=-15, seconds=30)),
('-1:15:30', timedelta(hours=-1, minutes=15, seconds=30)),
('-30.1', timedelta(seconds=-30, milliseconds=-100)),
# iso_8601
('P4Y', errors.DurationError),
('P4M', errors.DurationError),
('P4W', errors.DurationError),
('P4D', timedelta(days=4)),
('P0.5D', timedelta(hours=12)),
('PT5H', timedelta(hours=5)),
('PT5M', timedelta(minutes=5)),
('PT5S', timedelta(seconds=5)),
('PT0.000005S', timedelta(microseconds=5)),
(b'PT0.000005S', timedelta(microseconds=5)),
],
)
def test_parse_durations(value, result):
if result == errors.DurationError:
with pytest.raises(errors.DurationError):
parse_duration(value)
else:
assert parse_duration(value) == result
@pytest.mark.parametrize(
'field, value, error_message',
[
('dt', [], 'invalid type; expected datetime, string, bytes, int or float'),
('dt', {}, 'invalid type; expected datetime, string, bytes, int or float'),
('dt', object, 'invalid type; expected datetime, string, bytes, int or float'),
('d', [], 'invalid type; expected date, string, bytes, int or float'),
('d', {}, 'invalid type; expected date, string, bytes, int or float'),
('d', object, 'invalid type; expected date, string, bytes, int or float'),
('t', [], 'invalid type; expected time, string, bytes, int or float'),
('t', {}, 'invalid type; expected time, string, bytes, int or float'),
('t', object, 'invalid type; expected time, string, bytes, int or float'),
('td', [], 'invalid type; expected timedelta, string, bytes, int or float'),
('td', {}, 'invalid type; expected timedelta, string, bytes, int or float'),
('td', object, 'invalid type; expected timedelta, string, bytes, int or float'),
],
)
def test_model_type_errors(field, value, error_message):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: value})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {'loc': (field,), 'type': 'type_error', 'msg': error_message}
@pytest.mark.parametrize('field', ['dt', 'd', 't', 'dt'])
def test_unicode_decode_error(field):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: b'\x81'})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {
'loc': (field,),
'type': 'value_error.unicodedecode',
'msg': "'utf-8' codec can't decode byte 0x81 in position 0: invalid start byte",
}
def test_nan():
class Model(BaseModel):
dt: datetime
d: date
with pytest.raises(ValidationError) as exc_info:
Model(dt='nan', d='nan')
assert exc_info.value.errors() == [
{
'loc': ('dt',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
{
'loc': ('d',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
]
|
#!/usr/bin/env python
import time
import signal
from gfxhat import touch, lcd, backlight, fonts
from PIL import Image, ImageFont, ImageDraw
print("""hello-world.py
This basic example prints the text "Hello World" in the middle of the LCD
Press any button to see its corresponding LED toggle on/off.
Press Ctrl+C to exit.
""")
led_states = [False for _ in range(6)]
width, height = lcd.dimensions()
image = Image.new('P', (width, height))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(fonts.AmaticSCBold, 38)
text = "Hello World"
w, h = font.getsize(text)
x = (width - w) // 2
y = (height - h) // 2
draw.text((x, y), text, 1, font)
def handler(ch, event):
if event == 'press':
led_states[ch] = not led_states[ch]
touch.set_led(ch, led_states[ch])
if led_states[ch]:
backlight.set_pixel(ch, 0, 255, 255)
else:
backlight.set_pixel(ch, 0, 255, 0)
backlight.show()
for x in range(6):
touch.set_led(x, 1)
time.sleep(0.1)
touch.set_led(x, 0)
for x in range(6):
backlight.set_pixel(x, 0, 255, 0)
touch.on(x, handler)
backlight.show()
for x in range(128):
for y in range(64):
pixel = image.getpixel((x, y))
lcd.set_pixel(x, y, pixel)
lcd.show()
try:
signal.pause()
except KeyboardInterrupt:
for x in range(6):
backlight.set_pixel(x, 0, 0, 0)
touch.set_led(x, 0)
backlight.show()
lcd.clear()
lcd.show()
|
# Copyright 2018 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.common import v3_common_cert
from vmware_nsx.shell import resources as shell
from neutron_lib.callbacks import registry
from oslo_config import cfg
@admin_utils.output_header
def generate_cert(resource, event, trigger, **kwargs):
"""Generate self signed client certificate and private key
"""
return v3_common_cert.generate_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def delete_cert(resource, event, trigger, **kwargs):
"""Delete client certificate and private key """
return v3_common_cert.delete_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def show_cert(resource, event, trigger, **kwargs):
"""Show client certificate details """
return v3_common_cert.show_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def import_cert(resource, event, trigger, **kwargs):
"""Import client certificate that was generated externally"""
return v3_common_cert.import_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def show_nsx_certs(resource, event, trigger, **kwargs):
"""Show client certificates associated with openstack identity in NSX"""
return v3_common_cert.show_nsx_certs(cfg.CONF.nsx_p, **kwargs)
registry.subscribe(generate_cert,
constants.CERTIFICATE,
shell.Operations.GENERATE.value)
registry.subscribe(show_cert,
constants.CERTIFICATE,
shell.Operations.SHOW.value)
registry.subscribe(delete_cert,
constants.CERTIFICATE,
shell.Operations.CLEAN.value)
registry.subscribe(import_cert,
constants.CERTIFICATE,
shell.Operations.IMPORT.value)
registry.subscribe(show_nsx_certs,
constants.CERTIFICATE,
shell.Operations.NSX_LIST.value)
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-2.4, 0.4, 20)
y = x * x + 2 * x + 1
plt.plot(x, y, 'c', linewidth=2.0)
plt.text(-1.5, 1.8, 'y=x^2 + 2*x + 1',
fontsize=14, style='italic')
plt.annotate('minima point', xy=(-1, 0),
xytext=(-1, 0.3), horizontalalignment='center',
verticalalignment='top',
arrowprops=dict(arrowstyle='->',
connectionstyle='arc3'))
plt.savefig('annotate.png')
|
def retorno():
resp=input('Deseja executar o programa novamente?[s/n] ')
if(resp=='S' or resp=='s'):
verificar()
else:
print('Processo finalizado com sucesso!')
pass
def cabecalho(titulo):
print('-'*30)
print(f'{titulo:^30}')
print('-'*30)
pass
def mensagem_erro():
print('Dados inseridos são invalidos!')
pass
def verificar():
try:
cabecalho('Maior e Menor Valores Lista')
valores=list()
cont=0
for i in range(0,5):
cont+=1
num=int(input('Digite o {}º valor: '.format(cont)))
valores.append(num)
except:
mensagem_erro()
retorno()
else:
cont=0
maior=max(valores)
menor=min(valores)
print('Os valores da lista: {}'.format(valores))
print('O maior valor é {} ele está nas posições: '.format(maior),end='')
for i,v in enumerate(valores):
if(v==maior):
print('{} '.format(i),end='')
print('\nO menor valor é {} ele está nas posições: '.format(menor),end='')
for i,v in enumerate(valores):
if(v==menor):
print('{} '.format(i),end='')
print('\n')
retorno()
pass
verificar()
|
from flask import Flask, redirect, render_template, url_for
import numpy as np
app = Flask( __name__ )
@app.route( '/home' )
def index():
# retrieve the agent
agent = app.config['AGENT']
print( 'Episode: {}/{}'.format( agent.get_episode(), agent.get_episodes() ) )
print( 'Trial: {}/{}'.format( agent.get_trial(), agent.get_trials() ) )
if agent.get_episode() > agent.get_episodes():
# episodes are over
# compute the final prob
prob_reward_array = agent.get_prob_reward_array()
prob_01 = 100*np.round( prob_reward_array[0] / agent.get_episodes(), 2 )
prob_02 = 100*np.round( prob_reward_array[1] / agent.get_episodes(), 2 )
# avg the accumulated reward
avg_accumulated_reward = agent.get_avg_accumulated_reward_array()
# print the final
print( '\nProb Bandit 01:{}% - Prob Bandit 02:{}%'.format( prob_01, prob_02 ) )
print( '\n Avg accumulated reward: {}\n'.format( np.mean( avg_accumulated_reward ) ) )
# reset the episodes
agent.reset_episode()
elif agent.get_trial() > agent.get_trials():
# trials are over
# increase the episode
agent.set_episode()
# compute the partial results
agent.set_prob_reward_array()
# append the accumualted reward
agent.set_append_accumulated_reward()
# append the avg accumulated reward
agent.set_append_avg_accumulated_reward()
# reset the trial and initial variables
agent.set_trial( reset=1 )
# get the partial results
partial_result = agent.get_prob_reward_array()
prob_01 = partial_result[0] / agent.get_episode()
prob_02 = partial_result[1] / agent.get_episode()
# print the partial results
print( '\n Prob Bandit 01:{} - Prob Bandit 02:{}\n'.format( prob_01, prob_02 ) )
return redirect( url_for( 'index' ) )
else:
# trials are not over
# code the omniscient agent
bandit_machine = np.argmax( agent.get_prob_list() )
# set the current bandit machine
agent.set_current_bandit( bandit_machine )
# pick up the web page
if bandit_machine == 0: # red Yes button
return render_template( 'layout_red.html' )
else:
return render_template( 'layout_blue.html' )
@app.route( '/yes', methods=['POST'] )
def yes_event():
agent = app.config['AGENT']
# set the reward
reward = 1
# get the current bandit machine
bandit_machine = agent.get_current_bandit()
# add a reward to the bandit machine
agent.set_reward_array( bandit_machine, reward )
# increase how many times the bandit machine gets the lever pulled
agent.set_bandit_array( bandit_machine )
# sum the accumulated reward
agent.set_accumulated_reward( reward )
# increase the number of trial
agent.set_trial( reset=0 )
return redirect( url_for( 'index' ) )
@app.route( '/no', methods=['POST'] )
def no_event():
agent = app.config['AGENT']
# set the reward
reward = 0
# get the current bandit machine
bandit_machine = agent.get_current_bandit()
# add a reward to the bandit machine
agent.set_reward_array( bandit_machine, reward )
# increase how many times the bandit machine gets the lever pulled
agent.set_bandit_array( bandit_machine )
# sum the accumulated reward
agent.set_accumulated_reward( reward )
# increase the number of trial
agent.set_trial( reset=0 )
return redirect( url_for( 'index' ) )
if __name__ == "__main__":
trials = 100
episodes = 20
prob_list = [0.3, 0.8]
agent = OmniscientAgent( prob_list, trials, episodes )
app.config['AGENT'] = agent
app.run()
|
"""
An RDFLib ConjunctiveGraph is an (unnamed) aggregation of all the named graphs
within a Store. The :meth:`~rdflib.graph.ConjunctiveGraph.get_context`
method can be used to get a particular named graph for use such as to add
triples to, or the default graph can be used
This example shows how to create named graphs and work with the
conjunction (union) of all the graphs.
"""
from rdflib import Namespace, Literal, URIRef
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib.plugins.memory import IOMemory
if __name__ == "__main__":
ns = Namespace("http://love.com#")
mary = URIRef("http://love.com/lovers/mary")
john = URIRef("http://love.com/lovers/john")
cmary = URIRef("http://love.com/lovers/mary")
cjohn = URIRef("http://love.com/lovers/john")
store = IOMemory()
g = ConjunctiveGraph(store=store)
g.bind("love", ns)
# add a graph for Mary's facts to the Conjunctive Graph
gmary = Graph(store=store, identifier=cmary)
# Mary's graph only contains the URI of the person she love, not his cute name
gmary.add((mary, ns["hasName"], Literal("Mary")))
gmary.add((mary, ns["loves"], john))
# add a graph for Mary's facts to the Conjunctive Graph
gjohn = Graph(store=store, identifier=cjohn)
# John's graph contains his cute name
gjohn.add((john, ns["hasCuteName"], Literal("Johnny Boy")))
# enumerate contexts
for c in g.contexts():
print("-- %s " % c)
# separate graphs
print(gjohn.serialize(format="n3").decode("utf-8"))
print("===================")
print(gmary.serialize(format="n3").decode("utf-8"))
print("===================")
# full graph
print(g.serialize(format="n3").decode("utf-8"))
# query the conjunction of all graphs
xx = None
for x in g[mary : ns.loves / ns.hasCuteName]:
xx = x
print("Q: Who does Mary love?")
print("A: Mary loves {}".format(xx))
|
"""
Shared methods for Index subclasses backed by ExtensionArray.
"""
from typing import (
Hashable,
List,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from,
)
from pandas.core.dtypes.common import (
is_dtype_equal,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.base import Index
from pandas.core.ops import get_op_result_name
_T = TypeVar("_T", bound="NDArrayBackedExtensionIndex")
def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
name : str
Name of an attribute the class should inherit from its EA parent.
delegate : class
cache : bool, default False
Whether to convert wrapped properties into cache_readonly
wrap : bool, default False
Whether to wrap the inherited result in an Index.
Returns
-------
attribute, method, property, or cache_readonly
"""
attr = getattr(delegate, name)
if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
# getset_descriptor i.e. property defined in cython class
if cache:
def cached(self):
return getattr(self._data, name)
cached.__name__ = name
cached.__doc__ = attr.__doc__
method = cache_readonly(cached)
else:
def fget(self):
result = getattr(self._data, name)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
def fset(self, value):
setattr(self._data, name, value)
fget.__name__ = name
fget.__doc__ = attr.__doc__
method = property(fget, fset)
elif not callable(attr):
# just a normal attribute, no wrapping
method = attr
else:
def method(self, *args, **kwargs):
result = attr(self._data, *args, **kwargs)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
method.__name__ = name
method.__doc__ = attr.__doc__
return method
def inherit_names(names: List[str], delegate, cache: bool = False, wrap: bool = False):
"""
Class decorator to pin attributes from an ExtensionArray to a Index subclass.
Parameters
----------
names : List[str]
delegate : class
cache : bool, default False
wrap : bool, default False
Whether to wrap the inherited result in an Index.
"""
def wrapper(cls):
for name in names:
meth = inherit_from_data(name, delegate, cache=cache, wrap=wrap)
setattr(cls, name, meth)
return cls
return wrapper
def _make_wrapped_comparison_op(opname: str):
"""
Create a comparison method that dispatches to ``._data``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
other = _maybe_unwrap_index(other)
op = getattr(self._data, opname)
return op(other)
wrapper.__name__ = opname
return wrapper
def make_wrapped_arith_op(opname: str):
def method(self, other):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
meth = getattr(self._data, opname)
result = meth(_maybe_unwrap_index(other))
return _wrap_arithmetic_op(self, other, result)
method.__name__ = opname
return method
def _wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (
_wrap_arithmetic_op(self, other, result[0]),
_wrap_arithmetic_op(self, other, result[1]),
)
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = get_op_result_name(self, other)
result.name = res_name
return result
def _maybe_unwrap_index(obj):
"""
If operating against another Index object, we need to unwrap the underlying
data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray
implementation, otherwise we will incorrectly return NotImplemented.
Parameters
----------
obj : object
Returns
-------
unwrapped object
"""
if isinstance(obj, Index):
return obj._data
return obj
class ExtensionIndex(Index):
"""
Index subclass for indexes backed by ExtensionArray.
"""
# The base class already passes through to _data:
# size, __len__, dtype
_data: Union[IntervalArray, NDArrayBackedExtensionArray]
__eq__ = _make_wrapped_comparison_op("__eq__")
__ne__ = _make_wrapped_comparison_op("__ne__")
__lt__ = _make_wrapped_comparison_op("__lt__")
__gt__ = _make_wrapped_comparison_op("__gt__")
__le__ = _make_wrapped_comparison_op("__le__")
__ge__ = _make_wrapped_comparison_op("__ge__")
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
# ---------------------------------------------------------------------
# NDarray-Like Methods
def __getitem__(self, key):
result = self._data[key]
if isinstance(result, type(self._data)):
if result.ndim == 1:
return type(self)(result, name=self.name)
# Unpack to ndarray for MPL compat
result = result._ndarray
# Includes cases where we get a 2D ndarray back for MPL compat
deprecate_ndim_indexing(result)
return result
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
# overriding IndexOpsMixin improves performance GH#38083
return self._data.searchsorted(value, side=side, sorter=sorter)
# ---------------------------------------------------------------------
def _get_engine_target(self) -> np.ndarray:
return np.asarray(self._data)
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
arr = self._data.delete(loc)
return type(self)._simple_new(arr, name=self.name)
def repeat(self, repeats, axis=None):
nv.validate_repeat((), {"axis": axis})
result = self._data.repeat(repeats, axis=axis)
return type(self)._simple_new(result, name=self.name)
def insert(self, loc: int, item):
# ExtensionIndex subclasses must override Index.insert
raise AbstractMethodError(self)
def _validate_fill_value(self, value):
"""
Convert value to be insertable to underlying array.
"""
return self._data._validate_setitem_value(value)
def _get_unique_index(self):
if self.is_unique:
return self
result = self._data.unique()
return self._shallow_copy(result)
@doc(Index.map)
def map(self, mapper, na_action=None):
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError("The map function must return an Index object")
return result
except Exception:
return self.astype(object).map(mapper)
@doc(Index.astype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_dtype_equal(self.dtype, dtype):
if not copy:
# Ensure that self.astype(self.dtype) is self
return self
return self.copy()
if isinstance(dtype, np.dtype) and dtype.kind == "M" and dtype != "M8[ns]":
# For now Datetime supports this by unwrapping ndarray, but DTI doesn't
raise TypeError(f"Cannot cast {type(self._data).__name__} to dtype")
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False)
@cache_readonly
def _isnan(self) -> np.ndarray:
# error: Incompatible return value type (got "ExtensionArray", expected
# "ndarray")
return self._data.isna() # type: ignore[return-value]
@doc(Index.equals)
def equals(self, other) -> bool:
# Dispatch to the ExtensionArray's .equals method.
if self.is_(other):
return True
if not isinstance(other, type(self)):
return False
return self._data.equals(other._data)
class NDArrayBackedExtensionIndex(ExtensionIndex):
"""
Index subclass for indexes backed by NDArrayBackedExtensionArray.
"""
_data: NDArrayBackedExtensionArray
_data_cls: Union[
Type[Categorical],
Type[DatetimeArray],
Type[TimedeltaArray],
Type[PeriodArray],
]
@classmethod
def _simple_new(
cls,
values: NDArrayBackedExtensionArray,
name: Hashable = None,
):
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._ndarray
result._reset_identity()
return result
def _get_engine_target(self) -> np.ndarray:
return self._data._ndarray
def insert(self: _T, loc: int, item) -> _T:
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not valid for this dtype.
"""
arr = self._data
try:
code = arr._validate_scalar(item)
except (ValueError, TypeError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype, _ = infer_dtype_from(item, pandas_dtype=True)
dtype = find_common_type([self.dtype, dtype])
return self.astype(dtype).insert(loc, item)
else:
new_vals = np.concatenate(
(
arr._ndarray[:loc],
np.asarray([code], dtype=arr._ndarray.dtype),
arr._ndarray[loc:],
)
)
new_arr = arr._from_backing_data(new_vals)
return type(self)._simple_new(new_arr, name=self.name)
def putmask(self, mask, value) -> Index:
res_values = self._data.copy()
try:
res_values.putmask(mask, value)
except (TypeError, ValueError):
return self.astype(object).putmask(mask, value)
return type(self)._simple_new(res_values, name=self.name)
def _wrap_joined_index(self: _T, joined: np.ndarray, other: _T) -> _T:
name = get_op_result_name(self, other)
arr = self._data._from_backing_data(joined)
return type(self)._simple_new(arr, name=name)
|
def find_single(arr, n):
res = arr[0]
for i in range(1,n):
res = res ^ arr[i]
return res
|
# -*- coding: utf-8 -*-
"""Sample controller with all its actions protected."""
from tg import expose, flash, redirect, request
from tg.i18n import lazy_ugettext as l_
from molgears.model import DBSession, Tags, LCompound, LPurity, Names
from molgears.model import Compound, User, Projects
from molgears.model.auth import UserLists
from molgears.lib.base import BaseController
import os
from sqlalchemy import desc
from rdkit import Chem
from molgears.widgets.structure import checksmi
from datetime import datetime
#from tg.decorators import paginate
from webhelpers import paginate
from molgears.widgets.rgbTuple import htmlRgb, htmlRgb100, Num2Rgb
from molgears.controllers.ctoxicity import CytotoxicityController
__all__ = ['ResultsController']
class ResultsController(BaseController):
ctoxicity=CytotoxicityController()
@expose('molgears.templates.users.results.index')
def index(self, page=1, *args, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
project = DBSession.query(Projects).filter_by(name=pname).first()
page_url = paginate.PageURL_WebOb(request)
import pickle
try:
cells = pickle.loads([test.cell_line for test in project.tests if test.name == 'CT'][0])
except:
cells = None
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(LCompound.showme==True)
dsc = True
order = LCompound.id
tmpl = ''
alltags =[tag for tag in DBSession.query(Tags).order_by('name').all() ]
selection = None
similarity = None
userid = request.identity['repoze.who.userid']
user = DBSession.query(User).filter_by(user_name=userid).first()
ulist = None
ulists = set([l for l in user.lists if l.table == 'Results'] + [l for l in user.tg_user_lists if l.table == 'Results'])
items = user.items_per_page
try:
if kw['search'] != u'':
search_clicked = kw['search']
else:
search_clicked = None
except Exception:
search_clicked = None
if kw:
if kw.has_key('mylist'):
try:
ulist_id = int(kw['mylist'])
ulist = DBSession.query(UserLists).get(ulist_id)
except Exception:
flash(l_(u'List error'), 'error')
redirect(request.headers['Referer'])
if (ulist in user.lists) or (user in ulist.permitusers):
if ulist.elements:
import pickle
elements = [int(el) for el in pickle.loads(ulist.elements)]
if ulist.table == 'Results':
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(LCompound.id.in_(elements))
else:
flash(l_(u'Table error'), 'error')
redirect(request.headers['Referer'])
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
for k, v in kw.iteritems():
if str(k) == 'desc' and str(v) != '1':
dsc = None
elif str(k) == 'order_by':
if v in ('gid', 'create_date', 'box', 'form', 'state', 'entry', 'source', 'MDM2', 'MDM4', 'lcode'):
if v=='lcode':
order = LCompound.lcode
else:
order = LCompound.__getattribute__(LCompound, v)
else:
if v=='last_point':
lcompound=lcompound.join(LCompound.solubility)
order = v
elif hasattr(LCompound, v):
order = LCompound.__getattribute__(LCompound, v)
elif 'CTOX_' in v:
v = v.replace('CTOX_', '')
all_lcompounds = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).all()
for l in all_lcompounds:
l.avg_ct = v.replace('pp', '+')
order = '_avg_ct'
else:
order = v
if str(k) != 'select' and str(k) != 'remove' and str(v) != u'':
tmpl += str(k) + '=' + str(v) + '&'
elif str(k) == 'select':
try:
if isinstance(kw['select'], basestring):
selection = [kw['select']]
else:
selection = [id for id in kw['select']]
except Exception:
selection = None
if search_clicked:
try:
smiles = str(kw['smiles'])
if 'pp' in smiles:
smiles = smiles.replace('pp', '+')
method = str(kw['method'])
except Exception:
smiles = None
method = None
if smiles:
if checksmi(smiles):
from razi.functions import functions
from razi.expression import TxtMoleculeElement
if method == 'similarity':
# from razi.postgresql_rdkit import tanimoto_threshold
query_bfp = functions.morgan_b(TxtMoleculeElement(smiles), 2)
constraint = Compound.morgan.tanimoto_similar(query_bfp)
tanimoto_sml = Compound.morgan.tanimoto_similarity(query_bfp).label('tanimoto')
search = DBSession.query(LCompound, tanimoto_sml).join(LCompound.mol).join(LCompound.purity).filter(Compound.project.any(Projects.name==pname)).filter(constraint)
if order != LCompound.id:
if order == 'purity':
order = LPurity.value
if dsc:
search = search.order_by(desc(order).nullslast())
else:
search = search.order_by(order)
else:
search = search.order_by(desc(tanimoto_sml)).all()
lcompound = ()
similarity = ()
for row in search:
lcompound += (row[0], )
similarity += (row[1], )
currentPage = paginate.Page(lcompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', pname=pname, alltags=alltags, similarity=similarity,htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, cells=cells, ulists=ulists, ulist=ulist)
elif method == 'substructure':
constraint = Compound.structure.contains(smiles)
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(constraint)
elif method == 'identity':
lcompound = DBSession.query(LCompound).filter(Compound.project.any(Projects.name==pname)).join(LCompound.mol).filter(Compound.structure.equals(smiles))
else:
if method == 'smarts':
if dsc:
lcompound = lcompound.order_by(desc(order).nullslast())
else:
lcompound = lcompound.order_by(order)
search = lcompound.all()
sub_lcompounds = ()
patt = Chem.MolFromSmarts(smiles)
if not patt:
flash(l_(u'SMARTS error'), 'warning')
redirect(request.headers['Referer'])
for row in search:
m = Chem.MolFromSmiles(str(row.mol.structure))
mol = Chem.AddHs(m)
if mol.HasSubstructMatch(patt):
sub_lcompounds += (row, )
currentPage = paginate.Page(sub_lcompounds, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', pname=pname, alltags=alltags, similarity=similarity,htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, cells=cells, ulists=ulists, ulist=ulist)
else:
flash(l_(u'SMILES error'), 'warning')
redirect(request.headers['Referer'])
if kw.has_key('text_GID') and kw['text_GID'] !=u'':
try:
gid = int(kw['text_GID'])
lcompound = lcompound.filter(LCompound.gid == gid)
except Exception as msg:
flash(l_(u'GID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_ID') and kw['text_ID'] !=u'':
try:
id = int(kw['text_ID'])
lcompound = lcompound.filter(LCompound.id == id)
except Exception as msg:
flash(l_(u'ID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_name') and kw['text_name'] !=u'':
lcompound = lcompound.filter(Compound.names.any(Names.name.like(kw['text_name'].strip().replace('*', '%'))))
if kw.has_key('text_notes') and kw['text_notes'] !=u'':
lcompound = lcompound.filter(LCompound.notes.like(kw['text_notes'].replace('*', '%')))
if kw.has_key('text_lso') and kw['text_lso'] !=u'':
lcompound = lcompound.filter(LCompound.lso.like(kw['text_lso'].replace('*', '%')))
if kw.has_key('text_entry') and kw['text_entry'] !=u'':
lcompound = lcompound.filter(LCompound.entry.like(kw['text_entry'].replace('*', '%')))
if kw.has_key('text_box') and kw['text_box'] !=u'':
lcompound = lcompound.filter(LCompound.box.like(kw['text_box'].replace('*', '%')))
if kw.has_key('date_from') and kw['date_from'] !=u'':
date_from = datetime.strptime(str(kw['date_from']), '%Y-%m-%d')
lcompound = lcompound.filter(LCompound.create_date > date_from)
else:
date_from = None
if kw.has_key('date_to') and kw['date_to'] !=u'':
date_to = datetime.strptime(str(kw['date_to']), '%Y-%m-%d')
if date_from:
if date_to>date_from:
lcompound = lcompound.filter(LCompound.create_date < date_to)
else:
flash(l_(u'The End date must be later than the initial'), 'error')
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.create_date < date_to)
if kw.has_key('text_mdm2_hill_from') and kw['text_mdm2_hill_from'] !=u'':
text_mdm2_hill_from = float(kw['text_mdm2_hill_from'])
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 >= text_mdm2_hill_from)
else:
text_mdm2_hill_from = None
if kw.has_key('text_mdm2_hill_to') and kw['text_mdm2_hill_to'] !=u'':
text_mdm2_hill_to = float(kw['text_mdm2_hill_to'])
if text_mdm2_hill_from:
if text_mdm2_hill_to>=text_mdm2_hill_from:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 <= text_mdm2_hill_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 <= text_mdm2_hill_to)
if kw.has_key('text_mdm2_fluor_from') and kw['text_mdm2_fluor_from'] !=u'':
text_mdm2_fluor_from = float(kw['text_mdm2_fluor_from'])
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 >= text_mdm2_fluor_from)
else:
text_mdm2_fluor_from = None
if kw.has_key('text_mdm2_fluor_to') and kw['text_mdm2_fluor_to'] !=u'':
text_mdm2_fluor_to = float(kw['text_mdm2_fluor_to'])
if text_mdm2_fluor_from:
if text_mdm2_fluor_to>=text_mdm2_fluor_from:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 <= text_mdm2_fluor_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 <= text_mdm2_fluor_to)
if kw.has_key('text_mdm2_ki_from') and kw['text_mdm2_ki_from'] !=u'':
text_mdm2_ki_from = float(kw['text_mdm2_ki_from'])
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 >= text_mdm2_ki_from)
else:
text_mdm2_ki_from = None
if kw.has_key('text_mdm2_ki_to') and kw['text_mdm2_ki_to'] !=u'':
text_mdm2_ki_to = float(kw['text_mdm2_ki_to'])
if text_mdm2_ki_from:
if text_mdm2_ki_to>=text_mdm2_ki_from:
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 <= text_mdm2_ki_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 <= text_mdm2_ki_to)
if kw.has_key('text_mdm4_hill_from') and kw['text_mdm4_hill_from'] !=u'':
text_mdm4_hill_from = float(kw['text_mdm4_hill_from'])
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 >= text_mdm4_hill_from)
else:
text_mdm4_hill_from = None
if kw.has_key('text_mdm4_hill_to') and kw['text_mdm4_hill_to'] !=u'':
text_mdm4_hill_to = float(kw['text_mdm4_hill_to'])
if text_mdm4_hill_from:
if text_mdm4_hill_to>=text_mdm4_hill_from:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 <= text_mdm4_hill_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 <= text_mdm4_hill_to)
if kw.has_key('text_mdm4_fluor_from') and kw['text_mdm4_fluor_from'] !=u'':
text_mdm4_fluor_from = float(kw['text_mdm4_fluor_from'])
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 >= text_mdm4_fluor_from)
else:
text_mdm4_fluor_from = None
if kw.has_key('text_mdm4_fluor_to') and kw['text_mdm4_fluor_to'] !=u'':
text_mdm4_fluor_to = float(kw['text_mdm4_fluor_to'])
if text_mdm4_fluor_from:
if text_mdm4_fluor_to>=text_mdm4_fluor_from:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 <= text_mdm4_fluor_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 <= text_mdm4_fluor_to)
if kw.has_key('text_mdm4_ki_from') and kw['text_mdm4_ki_from'] !=u'':
text_mdm4_ki_from = float(kw['text_mdm4_ki_from'])
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 >= text_mdm4_ki_from)
else:
text_mdm4_ki_from = None
if kw.has_key('text_mdm4_ki_to') and kw['text_mdm4_ki_to'] !=u'':
text_mdm4_ki_to = float(kw['text_mdm4_ki_to'])
if text_mdm4_ki_from:
if text_mdm4_ki_to>=text_mdm4_ki_from:
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 <= text_mdm4_ki_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 <= text_mdm4_ki_to)
try:
tags = kw['text_tags']
except Exception:
tags = None
pass
if tags:
if isinstance(tags, basestring):
tagi = eval(tags)
if type(tagi) != type([]):
tagi = [int(tags)]
else:
tagi = [int(tid) for tid in tags]
lcompound = lcompound.filter(Compound.tags.any(Tags.id.in_(tagi)))
if dsc:
lcompound = lcompound.order_by(desc(order).nullslast())
else:
lcompound = lcompound.order_by(order)
if search_clicked and kw['search'] == "Download":
if kw['file_type'] and kw['file_type'] != u'' and kw['sell_type'] and kw['sell_type'] != u'':
if kw['sell_type'] == u'all':
lcompounds = lcompound.all()
elif kw['sell_type'] == u'selected':
if selection:
lcompounds = ()
for el in selection:
lcompounds += (DBSession.query(LCompound).get(el), )
else:
flash(l_(u'Lack of selected structures for download'), 'error')
redirect(request.headers['Referer'])
elif kw['sell_type'] == u'range':
lcompounds = lcompound.all()
if kw.has_key('select_from') and kw['select_from'] != u'':
try:
select_from = int(kw['select_from']) -1
if select_from<1 or select_from>len(lcompounds):
select_from = 0
except Exception:
select_from = 0
else:
select_from = 0
if kw.has_key('select_to') and kw['select_to'] != u'':
try:
select_to = int(kw['select_to'])
if select_to<2 or select_to>len(lcompounds):
select_to = len(lcompounds)
except Exception:
select_to = len(lcompounds)
else:
select_to = len(lcompounds)
lcompounds_new = ()
for el in range(select_from, select_to):
lcompounds_new += (lcompounds[el], )
lcompounds = lcompounds_new
else:
flash(l_(u'Lack of items to download'), 'error')
redirect(request.headers['Referer'])
try:
if isinstance(kw['options'], basestring):
options = [kw['options']]
else:
options = kw['options']
except Exception:
flash(l_('Choose download options'), 'error')
redirect(request.headers['Referer'])
if 'getsize' in kw:
size = int(kw['getsize']), int(kw['getsize'])
else:
size = 100, 100
if kw['file_type'] == 'pdf':
filename = userid + '_selected.pdf'
from xhtml2pdf.pisa import CreatePDF
from tg.render import render as render_template
import cStringIO
html = render_template({"length":len(lcompounds), "lcompound":lcompounds, "cells":cells, "options":options, "size":size}, "genshi", "molgears.templates.users.results.print2", doctype=None)
dest = './molgears/files/pdf/' + filename
result = file(dest, "wb")
CreatePDF(cStringIO.StringIO(html.encode("UTF-8")), result, encoding="utf-8")
result.close()
import paste.fileapp
f = paste.fileapp.FileApp('./molgears/files/pdf/'+ filename)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'xls':
filename = userid + '_selected.xls'
filepath = os.path.join('./molgears/files/download/', filename)
from PIL import Image
import xlwt
wbk = xlwt.Workbook()
sheet = wbk.add_sheet('sheet1')
j=0
if 'nr' in options:
sheet.write(0,j,u'Nr.')
j+=1
if 'gid' in options:
sheet.write(0,j,u'GID')
j+=1
if 'id' in options:
sheet.write(0,j,u'ID')
j+=1
if 'name' in options:
sheet.write(0,j,u'Name')
j+=1
if 'names' in options:
sheet.write(0,j,u'Names')
j+=1
if 'image' in options:
sheet.write(0,j,u'Image')
j+=1
if 'smiles' in options:
sheet.write(0,j,u'SMILES')
j+=1
if 'inchi' in options:
sheet.write(0,j,u'InChi')
j+=1
if 'lso' in options:
sheet.write(0,j,u'LSO')
j+=1
if 'num_atoms' in options:
sheet.write(0,j,u'Atoms')
j+=1
if 'mw' in options:
sheet.write(0,j,u'MW')
j+=1
if 'hba' in options:
sheet.write(0,j,u'hba')
j+=1
if 'hbd' in options:
sheet.write(0,j,u'hbd')
j+=1
if 'tpsa' in options:
sheet.write(0,j,u'tpsa')
j+=1
if 'logp' in options:
sheet.write(0,j,u'logP')
j+=1
if 'purity' in options:
sheet.write(0,j, u'Purity')
j+=1
if 'create_date' in options:
sheet.write(0,j,u'Date')
j+=1
if 'box' in options:
sheet.write(0,j,u'Box')
j+=1
if 'entry' in options:
sheet.write(0,j,u'Entry')
j+=1
if 'source' in options:
sheet.write(0,j,u'Source')
j+=1
if 'content' in options:
sheet.write(0,j,u'Content')
j+=1
if 'tags' in options:
sheet.write(0,j,u'Tags')
j+=1
if 'notes' in options:
sheet.write(0,j,u'Notes')
j+=1
for cell_line in cells:
if '_CT_%s' % cell_line in options:
sheet.write(0,j,u'CT %s' % cell_line)
j+=1
i = 1
for row in lcompounds:
j=0
if 'nr' in options:
sheet.write(i,j, str(i))
j+=1
if 'gid' in options:
sheet.write(i,j, row.gid)
j+=1
if 'id' in options:
sheet.write(i,j, row.id)
j+=1
if 'name' in options:
sheet.write(i,j, row.mol.name)
j+=1
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
sheet.write(i,j, names)
j+=1
if 'image' in options:
file_in = './molgears/public/img/%s.png' % row.gid
img = Image.open(file_in)
file_out = './molgears/public/img/bitmap/thumb%s.bmp' %row.gid
img.thumbnail(size, Image.ANTIALIAS)
img.save(file_out)
sheet.insert_bitmap(file_out , i,j, 5, 5)
j+=1
if 'smiles' in options:
sheet.write(i,j, str(row.mol.structure))
j+=1
if 'inchi' in options:
sheet.write(i,j, str(row.mol.inchi))
j+=1
if 'lso' in options:
sheet.write(i,j, row.lso)
j+=1
if 'num_atoms' in options:
sheet.write(i,j,str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
j+=1
if 'mw' in options:
sheet.write(i,j, str(row.mol.mw))
j+=1
if 'hba' in options:
sheet.write(i,j, str(row.mol.hba))
j+=1
if 'hbd' in options:
sheet.write(i,j, str(row.mol.hbd))
j+=1
if 'tpsa' in options:
sheet.write(i,j, str(row.mol.tpsa))
j+=1
if 'logp' in options:
sheet.write(i,j, str(row.mol.logp))
j+=1
if 'state' in options:
sheet.write(i,j, str(row.state))
j+=1
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
sheet.write(i,j, pur)
j+=1
if 'create_date' in options:
sheet.write(i,j, str(row.create_date))
j+=1
if 'owner' in options:
sheet.write(i,j, row.owner)
j+=1
if 'box' in options:
sheet.write(i,j, row.box)
j+=1
if 'entry' in options:
sheet.write(i,j, row.entry)
j+=1
if 'source' in options:
sheet.write(i,j, row.source)
j+=1
if 'content' in options:
if row.content:
sheet.write(i,j, str(row.content.value))
else:
sheet.write(i,j, 'None')
j+=1
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
sheet.write(i,j,tagsy)
j+=1
if 'notes' in options:
sheet.write(i,j, row.notes)
j+=1
for cell_line in cells:
if '_CT_%s' % cell_line in options:
res = []
if row.ctoxicity:
for ct in sorted(row.ctoxicity, key=lambda ct: ct.id):
if ct.cell_line==cell_line:
res.append(ct.ic50)
if len(res)>0:
sheet.write(i,j, str(round(sum(res)/len(res), 3)))
else:
sheet.write(i,j, '')
j+=1
i += 1
wbk.save(filepath)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'sdf':
filepath = './molgears/files/download/out.sdf'
ww = Chem.SDWriter(filepath)
from rdkit.Chem import AllChem
for row in lcompounds:
m2 = Chem.MolFromSmiles(str(row.mol.structure))
AllChem.Compute2DCoords(m2)
AllChem.EmbedMolecule(m2)
AllChem.UFFOptimizeMolecule(m2)
if 'smiles' in options:
m2.SetProp("smiles", str(row.mol.structure))
if 'name' in options:
m2.SetProp("_Name", str(row.mol.name.encode('ascii', 'ignore')))
if 'nr' in options:
m2.SetProp("Nr", str(lcompounds.index(row)+1))
if 'gid' in options:
m2.SetProp("GID", str(row.gid))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + ', '
m2.SetProp("names", str(names.encode('ascii', 'ignore')))
if 'inchi' in options:
m2.SetProp("InChi", str(row.mol.inchi))
if 'lso' in options:
m2.SetProp("LSO", str(row.lso))
if 'num_atoms' in options:
m2.SetProp("atoms", str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
if 'mw' in options:
m2.SetProp("mw", str(row.mol.mw))
if 'hba' in options:
m2.SetProp("hba", str(row.mol.hba))
if 'hbd' in options:
m2.SetProp("hbd", str(row.mol.hbd))
if 'tpsa' in options:
m2.SetProp("TPSA", str(row.mol.tpsa))
if 'logp' in options:
m2.SetProp("logP", str(row.mol.tpsa))
if 'create_date' in options:
m2.SetProp("create_date", str(row.create_date))
if 'owner' in options:
m2.SetProp("owner", str(row.owner))
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
m2.SetProp("tagi", str(tagsy.encode('ascii', 'ignore')))
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s \n' % (p.value, p.type)
m2.SetProp("purity", str(pur.encode('ascii', 'ignore')))
if 'content' in options:
if row.content:
m2.SetProp("content", str(row.content.value))
else:
m2.SetProp("content", "None")
j+=1
if 'box' in options:
m2.SetProp("box", str(row.box))
if 'entry' in options:
m2.SetProp("entry", str(row.entry))
if 'notes' in options:
if row.notes:
m2.SetProp("notes", str(row.notes.encode('ascii', 'ignore')))
else:
m2.SetProp("notes", " ")
for cell_line in cells:
if '_CT_%s' % cell_line in options:
res = []
if row.ctoxicity:
for ct in sorted(row.ctoxicity, key=lambda ct: ct.id):
if ct.cell_line==cell_line:
res.append(ct.ic50)
if len(res)>0:
m2.SetProp('CT_%s' % cell_line, str(round(sum(res)/len(res), 3)))
else:
m2.SetProp('CT_%s' % cell_line, ' ')
ww.write(m2)
ww.close()
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'csv' or 'txt':
filename = userid + '_selected.' + kw['file_type']
filepath = os.path.join('./molgears/files/download/', filename)
from molgears.widgets.unicodeCSV import UnicodeWriter
import csv
if kw['file_type'] == u'csv':
delimiter = ';'
else:
delimiter = ' '
with open(filepath, 'wb') as csvfile:
spamwriter = UnicodeWriter(csvfile, delimiter=delimiter,
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in lcompounds:
line =[]
if 'smiles' in options:
line.append(str(row.mol.structure))
if 'name' in options:
line.append(row.mol.name)
if 'nr' in options:
line.append(unicode(lcompounds.index(row)+1))
if 'gid' in options:
line.append(unicode(row.gid))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
line.append(names)
if 'inchi' in options:
line.append(row.mol.inchi)
if 'lso' in options:
line.append(row.lso)
if 'num_atoms' in options:
line.append(unicode(row.mol.num_hvy_atoms)+'/'+unicode(row.mol.num_atoms))
if 'mw' in options:
line.append(unicode(row.mol.mw))
if 'hba' in options:
line.append(unicode(row.mol.hba))
if 'hbd' in options:
line.append(unicode(row.mol.hbd))
if 'tpsa' in options:
line.append(unicode(row.mol.tpsa))
if 'logp' in options:
line.append(unicode(row.mol.logp))
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
line.append(pur)
if 'create_date' in options:
line.append(unicode(row.create_date))
if 'owner' in options:
line.append(row.owner)
if 'box' in options:
line.append(row.box)
if 'entry' in options:
line.append(row.entry)
if 'source' in options:
line.append(row.source)
if 'content' in options:
if row.content:
line.append(unicode(row.content.value))
else:
line.append(u'None')
if 'tags' in options:
tagsy= ''
for tag in row.mol.tags:
tagsy += tag.name + ', '
line.append(tagsy)
if 'notes' in options:
line.append(row.notes)
spamwriter.writerow(line)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
if selection and not search_clicked:
argv =''
gids = ''
for arg in selection:
argv += '/' + arg
tmp_result = DBSession.query(LCompound).get(arg)
gids += '/' + str(tmp_result.gid)
if kw['akcja'] == u'edit':
redirect('/%s/molecules/multiedit/index%s' % (pname, gids))
elif kw['akcja'] == u'results':
if len(selection) == 1:
redirect('/%s/results/new_result%s' % (pname, argv))
else:
redirect('/%s/results/multiresults/index%s' % (pname, argv))
elif kw['akcja'] == u'htrf':
if len(selection) == 1:
redirect('/%s/results/htrf/add_result2%s' % (pname, argv))
currentPage = paginate.Page(lcompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, pname=pname, alltags=alltags, similarity=similarity, cells=cells, ulists=ulists, ulist=ulist)
@expose()
def deletefromlist(self, ulist_id, *args):
"""
Delete compound from User List.
"""
ulist = DBSession.query(UserLists).get(ulist_id)
# pname = request.environ['PATH_INFO'].split('/')[1]
userid = request.identity['repoze.who.userid']
user = DBSession.query(User).filter_by(user_name=userid).first()
# ulists = [l for l in user.lists if l.table == 'Results']
if (ulist in user.lists) or (user in ulist.permitusers):
if ulist.elements:
import pickle
elements = [int(el) for el in pickle.loads(ulist.elements)]
for arg in args:
if int(arg) in elements:
elements.remove(int(arg))
ulist.elements = pickle.dumps(elements)
flash(l_(u'Task completed successfully'))
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations(object):
"""DdosProtectionPlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Update a DDoS protection plan tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the update DDoS protection plan resource tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
|
import setuptools
setuptools.setup(
name='qspreadsheet',
version='0.1.0',
author='TT-at-GitHub',
author_email='tt3d@start.bg',
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'numpy>=1.19.0',
'pandas>=1.0.5',
'PySide2>=5.13.0'
],
description='Package used to show and edit pandas DataFrame in GUI',
python_requires='>=3.7.5'
)
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libgcrypt(AutotoolsPackage):
"""Libgcrypt is a general purpose cryptographic library based on
the code from GnuPG. It provides functions for all cryptographic
building blocks: symmetric ciphers, hash algorithms, MACs, public
key algorithms, large integer functions, random numbers and a lot
of supporting functions. """
homepage = "http://www.gnu.org/software/libgcrypt/"
url = "https://gnupg.org/ftp/gcrypt/libgcrypt/libgcrypt-1.8.1.tar.bz2"
version('1.8.1', 'b21817f9d850064d2177285f1073ec55')
version('1.7.6', '54e180679a7ae4d090f8689ca32b654c')
version('1.6.2', 'b54395a93cb1e57619943c082da09d5f')
depends_on("libgpg-error")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 07:52:25 2020
Generate, Plot, and write all data needed for ball drop example 1
@author: granthutchings
"""
#%% Imports
import numpy as np
#import pyDOE # Latin Hypercube
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from invertH import invertHsim, invertHtrue
#%% notes
# x = R
# theta = C
# y = {h, t}, i.e., pairs of h and t that form a trace when plotted
# imagine the field experiments involve say 4 platforms --> 4 values of h.
# Then for each R, one experiment gives output of 4 h-t pairs (a curve).
# Likewise for the simulator, we have a dense grid of say 100 heights h.
# Then for each setting of {x, theta} = {R, C} we get output of 100 h-t
# pairs.
# I'll make python files to:
# 1. generate the h-t pairs and write them into files. (this file and invertH.py)
# 2. a "runmcmc"-type file that first calls...
# 3. ...a file that reads in the data and packages it appropriately
# generate "field" data and "simulator" data, where the simulator model is
# systematically off from reality.
# true: d2h/dt2 = g - C (dh/dt)^2 / R
# sim: d2h/dt2 = g - C (dh/dt) / R
# inputs for field experiments: x = R
# inputs for simulator: x = R, theta = C
# We want to calibrate theta in the simulator to match the field data.
#%% Compute data
def gen_data(et,plot_design=False,R_new=None,R_design=None,C_design=None):
n = 3; m = 25
g = 9.8 # gravity
C_true = .1 / (4 * np.pi / 3); print('generating data with C = ',C_true)
n_field_heights = 4
h_field = np.linspace(5,20,n_field_heights) # platform heights for the field experiments
h_sim = np.arange(1.5,25,1.5) # grid of heights fed to the simulator
h_dense = np.concatenate((np.arange(0,2,.01),np.arange(2,25,.5))) # a denser grid for drawing the curves
# the coefficient of drag for a smooth sphere is 0.1, and we're
# dividing by 4/3 pi to absorb a constant related to the volume of the
# sphere (not including R)
if R_new is None: R = np.array([.1, .2, .4]) # radii of balls to try (in meters)
else: R = R_new
# get a Latin hypercube sim_design of m=25 points over R_sim, C_sim
#sim_design = pyDOE.lhs(2,m)
# Use Kary's sim_designign for testing purposes
sim_design = np.array([
[0.1239, 0.8024],
[0.8738, 0.6473],
[0.6140, 0.3337],
[0.8833, 0.4783],
[0.9946, 0.0548],
[0.1178, 0.9382],
[0.1805, 0.2411],
[0.6638, 0.2861],
[0.2939, 0.1208],
[0.2451, 0.2397],
[0.4577, 0.5696],
[0.4377, 0.8874],
[0.0737, 0.7384],
[0.6931, 0.8683],
[0.4901, 0.7070],
[0.5953, 0.9828],
[0.7506, 0.1009],
[0.7783, 0.4225],
[0.8333, 0.5318],
[0.3987, 0.6312],
[0.2021, 0.4990],
[0.3495, 0.3680],
[0.9411, 0.7935],
[0.0198, 0.0218],
[0.5440, 0.1925]])
# scale the first column to [0,.5] and call it R_sim
# (this inclusim_design our field values, i.e., R \in [0,.5])
# scale the second column to [0.05,.25] and call it Csim
# (likewise, Ctrue \in [0.05, .25])
sim_design[:,0] = sim_design[:,0] * .4 + .05
sim_design[:,1] = sim_design[:,1] * .2 + .05
if R_design is not None: R_sim = R_design
else: R_sim = sim_design[:,0]
if C_design is not None: C_sim = C_design
else: C_sim = sim_design[:,1]
if plot_design:
plt.scatter(R_sim,C_sim)
plt.xlabel("R design points");plt.ylabel("C design points")
plt.title("Simulator Design")
plt.show()
# Generate field data for each R
y_field = invertHtrue(h_field, g, C_true, R, et) # observed times
y_field_dense = invertHtrue(h_dense, g, C_true, R, et) # dense grid for plots
# imagine that the biggest ball is too big to get to the highest
# platform, so we don't observe data there
#y_field[-1,-1] = np.nan
# Generate simulated data for each (C,R) pair
y_sim = invertHsim(h_sim, g, C_sim, R_sim)
y_sim_dense = invertHsim(h_dense, g, C_sim, R_sim)
data_dict = dict([('R',R),('sim_design',np.column_stack((R_sim,C_sim))),\
('n',n),('m',m),('C_true',C_true),\
('h_field',h_field),('h_sim',h_sim),('h_dense',h_dense),\
('y_field',y_field),('y_field_dense',y_field_dense),\
('y_sim',y_sim),('y_sim_dense',y_sim_dense)])
return(data_dict)
#%% #===================== Plots ===============================#
def plot_data(data_dict,inset=True,near_sim=True):
n = data_dict['n']
m = data_dict['m']
y_sim = data_dict['y_sim']
y_field = data_dict['y_field']
R = data_dict['R']
R_sim = data_dict['sim_design'][:,0]
C_sim = data_dict['sim_design'][:,1]
h_field = data_dict['h_field']
h_sim = data_dict['h_sim']
h_dense = data_dict['h_dense']
y_field = data_dict['y_field']
y_field_dense = data_dict['y_field_dense']
y_sim = data_dict['y_sim']
y_sim_dense = data_dict['y_sim_dense']
if isinstance(y_field, list): ragged = True
else: ragged = False
if ragged:
y_max = max(max(np.array([np.max(k) for k in y_field])),max(y_sim.max(1)))
else:
y_max = max(max(y_field.max(1)),max(y_sim.max(1))) # max of all row maxes for axis limit
# find closest values each R
# ith column of R_nearest_sim_design contains the n_neighbors nearest sim_designign points (by index)
# for ith value of R
n_neighbors = 3
R_nearest_sim_design = np.zeros(shape=(n_neighbors,len(R)),dtype=int)
for i in range(len(R)):
dist = np.argsort(np.abs(R_sim-R[i]))
R_nearest_sim_design[:,i] = dist[0:n_neighbors]
# Generate plot for each radius
colors = ('r', 'g', 'b')
fig = plt.figure(figsize=[12,12],constrained_layout=True)
gs = GridSpec(2,2,figure=fig)
axs = np.array([fig.add_subplot(gs[0,0]),\
fig.add_subplot(gs[0,1]),\
fig.add_subplot(gs[1,0])])
for i in range(len(R)):
# axis limits, ticks, and labels
axs[i].set_xlim([0, 25])
axs[i].set_ylim([0, y_max+.5])
axs[i].xaxis.set_ticks(np.arange(0,30,5))
axs[i].yaxis.set_ticks(np.arange(0,y_max+.5,1))
axs[i].set_title("Ball Radius {} m".format(R[i]),fontweight="bold")
axs[i].set_xlabel("Distance (m)")
axs[i].set_ylabel("Time (s)")
# simulations - all
for j in range(m):
axs[i].plot(h_dense, np.transpose(y_sim_dense)[:,j],color='lightgreen',\
label="Simulation runs" if j==0 else "")
if near_sim:
# simulations - nearest neighbors
for j in range(n_neighbors):
axs[i].plot(h_dense,np.transpose(y_sim_dense)[:,R_nearest_sim_design[j,i]],\
linestyle="--",\
color=colors[j],label="Nearest Sim {}".format(j+1))
# true data curve and "real data points"
axs[i].plot(h_dense, y_field_dense[i,:],'k',label="Reality")
if ragged:
axs[i].plot(h_field[i],y_field[i],'ks',label='Reality')
else:
axs[i].plot(h_field, y_field[i,],'ks',label="Field data")
axs[i].legend(loc="lower right")
if inset:
# imbed sim_designign point subplot
inset_ax = inset_axes(axs[i],width="30%",height="30%",loc="upper left",\
borderpad=2.5)
inset_ax.set_xlabel("R sim_design values",fontsize=7,labelpad=1)
inset_ax.set_ylabel("C sim_design values",fontsize=7)
inset_ax.xaxis.set_ticks(R)
inset_ax.yaxis.set_ticks(np.arange(0,.251,.05))
inset_ax.tick_params(axis='both', which='major', labelsize=7, pad = -5)
inset_ax.scatter(R_sim,C_sim,s=15, facecolors='none', edgecolors='grey')
inset_ax.scatter(R_sim[R_nearest_sim_design[:,i]],C_sim[R_nearest_sim_design[:,i]],s=15,\
color=colors)
inset_ax.axvline(x=R[i], ymin=0, ymax=1,color='k',linewidth=.5)
plt.savefig('data/plotAll.png', dpi=300)
plt.show()
#%% #==================== Write data ===========================#
# write the h-t pairs into files
def write_data(data_dict, datadir = '/Users/granthutchings/Documents/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):
# datadir == directory where data files should be written to or read from
# sim.dat, should be length(hsim) x length(Csim)
y_sim = data_dict['y_sim']
with open(datadir+'sim.dat',"w+") as f:
for line in np.array(np.transpose(y_sim)):
np.savetxt(f, line)
# sim.height, a file with just the heights (same for all sim runs)
h_sim = data_dict['h_sim']
with open(datadir+'sim.height',"w+") as f:
for line in np.array(np.transpose(h_sim)):
np.savetxt(f, line)
# sim.sim_designign, length(Csim) x (num X's + num thetas)
R_sim = data_dict['R_sim']; C_sim = data_dict['C_sim']
sim_design = np.transpose(np.array([R_sim, C_sim]))
with open(datadir+'sim.design',"w+") as f:
for line in sim_design:
np.savetxt(f, line)
# field.dat, one row per experiment (radius)
y_field = data_dict['y_field']
with open(datadir+'field.dat',"w+") as f:
for line in np.array(y_field):
np.savetxt(f, line)
# field.height
h_field = data_dict['h_field']
with open(datadir+'field.height',"w+") as f:
for line in np.array(h_field):
np.savetxt(f, line)
# field radii
R = data_dict['R']
with open(datadir+'field.radii',"w+") as f:
for line in np.array(R):
np.savetxt(f, line)
#%%
def read_data(datadir = '/Users/granthutchings/Documents/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):
with open(datadir+'sim.dat','r') as f:
y_sim = np.loadtxt(f)
with open(datadir+'sim.height',"r") as f:
h_sim = np.loadtxt(f)
with open(datadir+'sim.design','r') as f:
sim_design = np.loadtxt(f)
with open(datadir+'field.dat','r') as f:
y_field = np.loadtxt(f)
with open(datadir+'field.height','r') as f:
h_field = np.loadtxt(f)
with open(datadir+'field.radii','r') as f:
R = np.loadtxt(f)
data_dict = dict([('R',R),('sim_design',sim_design),\
('h_field',h_field),('h_sim',h_sim),\
('y_field',y_field),('y_sim',y_sim)])
return(data_dict)
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for IO functions
defined in :py:class:`monai.transforms.io.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
from pathlib import Path
from typing import Optional, Union
import numpy as np
from monai.config import DtypeLike, KeysCollection
from monai.data.image_reader import ImageReader
from monai.transforms.io.array import LoadImage, SaveImage
from monai.transforms.transform import MapTransform
from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, ensure_tuple, ensure_tuple_rep
__all__ = ["LoadImaged", "LoadImageD", "LoadImageDict", "SaveImaged", "SaveImageD", "SaveImageDict"]
class LoadImaged(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.LoadImage`,
It can load both image data and metadata. When loading a list of files in one key,
the arrays will be stacked and a new dimension will be added as the first dimension
In this case, the meta data of the first image will be used to represent the stacked result.
The affine transform of all the stacked images should be same.
The output metadata field will be created as ``meta_keys`` or ``key_{meta_key_postfix}``.
If reader is not specified, this class automatically chooses readers
based on the supported suffixes and in the following order:
- User-specified reader at runtime when calling this loader.
- User-specified reader in the constructor of `LoadImage`.
- Readers from the last to the first in the registered list.
- Current default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),
(npz, npy -> NumpyReader), (others -> ITKReader).
Note:
- If `reader` is specified, the loader will attempt to use the specified readers and the default supported
readers. This might introduce overheads when handling the exceptions of trying the incompatible loaders.
In this case, it is therefore recommended to set the most appropriate reader as
the last item of the `reader` parameter.
See also:
- tutorial: https://github.com/Project-MONAI/tutorials/blob/master/modules/load_medical_images.ipynb
"""
def __init__(
self,
keys: KeysCollection,
reader: Optional[Union[ImageReader, str]] = None,
dtype: DtypeLike = np.float32,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
overwriting: bool = False,
image_only: bool = False,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
reader: register reader to load image file and meta data, if None, still can register readers
at runtime or use the default readers. If a string of reader name provided, will construct
a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader",
"PILReader", "ITKReader", "NumpyReader".
dtype: if not None convert the loaded image data to this data type.
meta_keys: explicitly indicate the key to store the corresponding meta data dictionary.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None, use `key_{postfix}` to store the metadata of the nifti image,
default is `meta_dict`. The meta data is a dictionary object.
For example, load nifti file for `image`, store the metadata into `image_meta_dict`.
overwriting: whether allow to overwrite existing meta data of same key.
default is False, which will raise exception if encountering existing key.
image_only: if True return dictionary containing just only the image volumes, otherwise return
dictionary containing image data array and header dict per input key.
allow_missing_keys: don't raise exception if key is missing.
args: additional parameters for reader if providing a reader name.
kwargs: additional parameters for reader if providing a reader name.
"""
super().__init__(keys, allow_missing_keys)
self._loader = LoadImage(reader, image_only, dtype, *args, **kwargs)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.overwriting = overwriting
def register(self, reader: ImageReader):
self._loader.register(reader)
def __call__(self, data, reader: Optional[ImageReader] = None):
"""
Raises:
KeyError: When not ``self.overwriting`` and key already exists in ``data``.
"""
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
data = self._loader(d[key], reader)
if self._loader.image_only:
if not isinstance(data, np.ndarray):
raise ValueError("loader must return a numpy array (because image_only=True was used).")
d[key] = data
else:
if not isinstance(data, (tuple, list)):
raise ValueError("loader must return a tuple or list (because image_only=False was used).")
d[key] = data[0]
if not isinstance(data[1], dict):
raise ValueError("metadata must be a dict.")
meta_key = meta_key or f"{key}_{meta_key_postfix}"
if meta_key in d and not self.overwriting:
raise KeyError(f"Meta data with key {meta_key} already exists and overwriting=False.")
d[meta_key] = data[1]
return d
class SaveImaged(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SaveImage`.
Note:
Image should be channel-first shape: [C,H,W,[D]].
If the data is a patch of big image, will append the patch index to filename.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None and `key_{postfix}` was used to store the metadata in `LoadImaged`.
need the key to extract metadata to save images, default is `meta_dict`.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, affine, original_shape, etc.
if no corresponding metadata, set to `None`.
output_dir: output image directory.
output_postfix: a string appended to all output file names, default to `trans`.
output_ext: output file extension name, available extensions: `.nii.gz`, `.nii`, `.png`.
resample: whether to resample before saving the data array.
if saving PNG format image, based on the `spatial_shape` from metadata.
if saving NIfTI format image, based on the `original_affine` from metadata.
mode: This option is used when ``resample = True``. Defaults to ``"nearest"``.
- NIfTI files {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
- PNG files {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
padding_mode: This option is used when ``resample = True``. Defaults to ``"border"``.
- NIfTI files {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
- PNG files
This option is ignored.
scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
[0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.
it's used for PNG format only.
dtype: data type during resampling computation. Defaults to ``np.float64`` for best precision.
if None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
it's used for NIfTI format only.
output_dtype: data type for saving data. Defaults to ``np.float32``.
it's used for NIfTI format only.
allow_missing_keys: don't raise exception if key is missing.
squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel
has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and
then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false,
image will always be saved as (H,W,D,C).
it's used for NIfTI format only.
data_root_dir: if not empty, it specifies the beginning parts of the input file's
absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from
`data_root_dir` to preserve folder structure when saving in case there are files in different
folders with the same file names. for example:
input_file_name: /foo/bar/test1/image.nii,
output_postfix: seg
output_ext: nii.gz
output_dir: /output,
data_root_dir: /foo/bar,
output will be: /output/test1/image/image_seg.nii.gz
separate_folder: whether to save every file in a separate folder, for example: if input filename is
`image.nii`, postfix is `seg` and folder_path is `output`, if `True`, save as:
`output/image/image_seg.nii`, if `False`, save as `output/image_seg.nii`. default to `True`.
print_log: whether to print log about the saved file path, etc. default to `True`.
"""
def __init__(
self,
keys: KeysCollection,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
output_dir: Union[Path, str] = "./",
output_postfix: str = "trans",
output_ext: str = ".nii.gz",
resample: bool = True,
mode: Union[GridSampleMode, InterpolateMode, str] = "nearest",
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
scale: Optional[int] = None,
dtype: DtypeLike = np.float64,
output_dtype: DtypeLike = np.float32,
allow_missing_keys: bool = False,
squeeze_end_dims: bool = True,
data_root_dir: str = "",
separate_folder: bool = True,
print_log: bool = True,
) -> None:
super().__init__(keys, allow_missing_keys)
self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys))
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self._saver = SaveImage(
output_dir=output_dir,
output_postfix=output_postfix,
output_ext=output_ext,
resample=resample,
mode=mode,
padding_mode=padding_mode,
scale=scale,
dtype=dtype,
output_dtype=output_dtype,
squeeze_end_dims=squeeze_end_dims,
data_root_dir=data_root_dir,
separate_folder=separate_folder,
print_log=print_log,
)
def __call__(self, data):
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
if meta_key is None and meta_key_postfix is not None:
meta_key = f"{key}_{meta_key_postfix}"
meta_data = d[meta_key] if meta_key is not None else None
self._saver(img=d[key], meta_data=meta_data)
return d
LoadImageD = LoadImageDict = LoadImaged
SaveImageD = SaveImageDict = SaveImaged
|
from datetime import timedelta
from pathlib import Path
import click
from overhave.base_settings import LoggingSettings
from overhave.cli.group import overhave
from overhave.transport import OverhaveS3Bucket, OverhaveS3ManagerSettings, S3Manager
from overhave.utils import get_current_time
@overhave.group(short_help="Run s3 cloud interaction commands")
def s3() -> None:
pass
@s3.group(short_help="S3 cloud bucket's interaction commands")
def bucket() -> None:
pass
def _check_bucket_registered(name: str) -> None:
if name in (item.value for item in list(OverhaveS3Bucket)):
return
click.secho(f"Note: specified s3 bucket name '{name}' not presented in OverhaveS3Bucket enum!", fg="yellow")
def _get_s3_manager() -> S3Manager:
LoggingSettings().setup_logging()
manager = S3Manager(OverhaveS3ManagerSettings(autocreate_buckets=False))
manager.initialize()
return manager
@bucket.command(short_help="Create s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
def create(name: str) -> None:
""" Create s3 bucket. """
_check_bucket_registered(name)
_get_s3_manager().create_bucket(name)
@bucket.command(short_help="Delete s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--force", is_flag=True, help="Delete all files in bucket, then delete bucket",
)
def delete(name: str, force: bool) -> None:
""" Delete s3 bucket. """
_check_bucket_registered(name)
_get_s3_manager().delete_bucket(name, force=force)
@bucket.command(short_help="Remove old s3 cloud bucket files")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-d", "--days", type=int, help="Remove all files in bucket older then specified days value",
)
def remove_files(name: str, days: int) -> None:
""" Remove s3 bucket files older . """
_check_bucket_registered(name)
manager = _get_s3_manager()
target_date = get_current_time() - timedelta(days=days)
objects = manager.get_bucket_objects(name)
objects_to_delete = []
for obj in objects:
if not obj.modified_at < target_date:
continue
objects_to_delete.append(obj)
if not objects_to_delete:
click.secho(f"No one object older than {days} days.")
return
click.secho(f"Objects older then {days} days: {[x.name for x in objects_to_delete]}")
manager.delete_bucket_objects(bucket=bucket, objects=objects_to_delete)
@s3.command(short_help="Download file from s3 bucket")
@click.option(
"-b", "--bucket", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--filename", type=str, help="Filename for downloading",
)
@click.option("-d", "--dir-to-save", type=str, help="Directory for saving file", default=".")
def download_file(bucket: str, filename: str, dir_to_save: str) -> None:
""" Create s3 bucket. """
_check_bucket_registered(bucket)
_get_s3_manager().download_file(filename=filename, bucket=bucket, dir_to_save=Path(dir_to_save))
|
from coffea.lookup_tools.lookup_base import lookup_base
import numpy
from copy import deepcopy
class dense_lookup(lookup_base):
def __init__(self, values, dims, feval_dim=None):
super(dense_lookup, self).__init__()
self._dimension = 0
whattype = type(dims)
if whattype == numpy.ndarray:
self._dimension = 1
else:
self._dimension = len(dims)
if self._dimension == 0:
raise Exception("Could not define dimension for {}".format(whattype))
self._axes = deepcopy(dims)
self._feval_dim = None
vals_are_strings = (
"string" in values.dtype.name
or "str" in values.dtype.name
or "unicode" in values.dtype.name
or "bytes" in values.dtype.name
) # ....
if not isinstance(values, numpy.ndarray):
raise TypeError("values is not a numpy array, but %r" % type(values))
if vals_are_strings:
raise Exception("dense_lookup cannot handle string values!")
self._values = deepcopy(values)
def _evaluate(self, *args):
indices = []
if self._dimension == 1:
indices.append(
numpy.clip(
numpy.searchsorted(self._axes, args[0], side="right") - 1,
0,
self._values.shape[0] - 1,
)
)
else:
for dim in range(self._dimension):
indices.append(
numpy.clip(
numpy.searchsorted(self._axes[dim], args[dim], side="right")
- 1,
0,
self._values.shape[dim] - 1,
)
)
return self._values[tuple(indices)]
def __repr__(self):
myrepr = "{} dimensional histogram with axes:\n".format(self._dimension)
temp = ""
if self._dimension == 1:
temp = "\t1: {}\n".format(self._axes)
else:
temp = "\t1: {}\n".format(self._axes[0])
for idim in range(1, self._dimension):
temp += "\t{}: {}\n".format(idim + 1, self._axes[idim])
myrepr += temp
return myrepr
|
import axp192
import kv
try:
# for m5stack-core2 only
axp = axp192.Axp192()
axp.powerAll()
axp.setLCDBrightness(80) # 设置背光亮度 0~100
except OSError:
print("make sure axp192.py is in libs folder")
def _on_get_url(url):
kv.set('_amp_pyapp_url', url)
execfile('/lib/appOta.py')
def _connect_wifi(ssid, passwd):
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
sta_if.active(True)
sta_if.scan()
sta_if.connect(ssid, passwd)
channel = kv.get('app_upgrade_channel')
if channel == "disable":
pass
else:
ssid = kv.get('_amp_wifi_ssid')
passwd = kv.get('_amp_wifi_passwd')
if isinstance(ssid, str) and isinstance(passwd, str):
_connect_wifi(ssid, passwd)
import online_upgrade
online_upgrade.on(_on_get_url)
|
"""
sphinx.ext.duration
~~~~~~~~~~~~~~~~~~~
Measure durations of Sphinx processing.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from datetime import datetime, timedelta
from itertools import islice
from operator import itemgetter
from typing import Any, Dict, List, cast
from docutils import nodes
from sphinx.application import Sphinx
from sphinx.domains import Domain
from sphinx.locale import __
from sphinx.util import logging
logger = logging.getLogger(__name__)
class DurationDomain(Domain):
"""A domain for durations of Sphinx processing."""
name = 'duration'
@property
def reading_durations(self) -> Dict[str, timedelta]:
return self.data.setdefault('reading_durations', {})
def note_reading_duration(self, duration: timedelta) -> None:
self.reading_durations[self.env.docname] = duration
def clear(self) -> None:
self.reading_durations.clear()
def clear_doc(self, docname: str) -> None:
self.reading_durations.pop(docname, None)
def merge_domaindata(self, docnames: List[str], otherdata: Dict[str, timedelta]) -> None:
for docname, duration in otherdata.items():
if docname in docnames:
self.reading_durations[docname] = duration
def on_builder_inited(app: Sphinx) -> None:
"""Initialize DurationDomain on bootstrap.
This clears results of last build.
"""
domain = cast(DurationDomain, app.env.get_domain('duration'))
domain.clear()
def on_source_read(app: Sphinx, docname: str, content: List[str]) -> None:
"""Start to measure reading duration."""
app.env.temp_data['started_at'] = datetime.now()
def on_doctree_read(app: Sphinx, doctree: nodes.document) -> None:
"""Record a reading duration."""
started_at = app.env.temp_data.get('started_at')
duration = datetime.now() - started_at
domain = cast(DurationDomain, app.env.get_domain('duration'))
domain.note_reading_duration(duration)
def on_build_finished(app: Sphinx, error: Exception) -> None:
"""Display duration ranking on current build."""
domain = cast(DurationDomain, app.env.get_domain('duration'))
durations = sorted(domain.reading_durations.items(), key=itemgetter(1), reverse=True)
if not durations:
return
logger.info('')
logger.info(__('====================== slowest reading durations ======================='))
for docname, d in islice(durations, 5):
logger.info('%d.%03d %s', d.seconds, d.microseconds / 1000, docname)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(DurationDomain)
app.connect('builder-inited', on_builder_inited)
app.connect('source-read', on_source_read)
app.connect('doctree-read', on_doctree_read)
app.connect('build-finished', on_build_finished)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
# Generated by Django 3.1.7 on 2021-05-13 03:02
from django.db import migrations, models
import django.utils.timezone
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='store',
name='city',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='city_area',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='store',
name='company_name',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='country',
field=django_countries.fields.CountryField(default="VN", max_length=2),
preserve_default=False,
),
migrations.AddField(
model_name='store',
name='country_area',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='store',
name='postal_code',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='store',
name='street_address_1',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='street_address_2',
field=models.CharField(blank=True, max_length=256),
),
]
|
import numpy as np
import pandas as pd
import pytest
from dku_timeseries.timeseries_helpers import generate_date_range, get_date_offset
from recipe_config_loading import get_resampling_params
@pytest.fixture
def config():
config = {u'clip_end': 0, u'constant_value': 0, u'extrapolation_method': u'none', u'shift': 0, u'time_unit_end_of_week': u'SUN',
u'datetime_column': u'Date', u'advanced_activated': False, u'time_unit': u'quarters', u'clip_start': 0, u'time_step': 2,
u'interpolation_method': u'linear'}
return config
class TestResamplerHelpers:
def test_date_offset(self):
time_unit = "business_days"
offset_value = 0
sunday = pd.Timestamp('2021-01-31 10:00:00')
offset = get_date_offset(time_unit, offset_value)
assert sunday + offset == sunday
sunday = pd.Timestamp('2021-01-31 00:00:00')
offset = get_date_offset(time_unit, 1)
assert sunday + offset == pd.Timestamp('2021-02-01 00:00:00')
assert sunday - offset == pd.Timestamp('2021-01-29 00:00:00')
assert sunday + offset + offset == pd.Timestamp('2021-02-02 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 1)
assert friday + offset == pd.Timestamp('2021-02-01 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 2)
assert friday + offset == pd.Timestamp('2021-02-02 00:00:00')
saturday = pd.Timestamp('2021-01-30 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-01 00:00:00')
saturday = pd.Timestamp('2021-02-04 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-05 00:00:00')
def test_generate_date_range_month(self, config):
config["time_unit"] = "months"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
end_time = pd.Timestamp('2021-06-20 00:00:00')
start_time = pd.Timestamp('2021-01-31 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-23 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00').tz_localize("CET")
end_time = pd.Timestamp('2021-06-20 00:00:00').tz_localize("CET")
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(
['2021-01-31 00:00:00+01:00', '2021-03-31 00:00:00+02:00', '2021-05-31 00:00:00+02:00', '2021-07-31 00:00:00+02:00']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
end_time = pd.Timestamp('2021-06-20 00:00:00')
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-03-31', '2021-05-31', '2021-07-31']))
def test_generate_date_range_week(self, config):
config["time_unit"] = "weeks"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
start_time = pd.Timestamp('2020-12-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24']))
end_time = pd.Timestamp('2021-01-24 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24', '2021-02-07']))
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-10', '2021-01-24', '2021-02-07']))
config["time_unit"] = "weeks"
config["time_unit_end_of_week"] = "WED"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-23', '2021-01-6', '2021-01-20', '2021-02-03']))
def test_generate_date_range_quarters(self, config):
config["time_step"] = 1
config["time_unit"] = "quarters"
start_time = pd.Timestamp('2020-01-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-04-30', '2020-07-31', '2020-10-31', '2021-01-31']))
def test_generate_date_range_half_year(self, config):
config["time_step"] = 1
config["time_unit"] = "semi_annual"
start_time = pd.Timestamp('2020-01-01 00:00:00')
end_time = pd.Timestamp('2021-06-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-07-31', '2021-01-31', '2021-07-31']))
def test_generate_date_range_b_days(self, config):
config["time_unit"] = "business_days"
config["time_step"] = 1
start_time = pd.Timestamp('2021-01-02 00:00:00')
end_time = pd.Timestamp('2021-01-10 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 1
clip_end = 1
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 2
clip_end = 2
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08']))
def test_generate_date_range_days(self, config):
config["time_unit"] = "days"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190214 01:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-02-07 00:00:00+01:00', '2019-02-08 00:00:00+01:00',
'2019-02-09 00:00:00+01:00', '2019-02-10 00:00:00+01:00',
'2019-02-11 00:00:00+01:00', '2019-02-12 00:00:00+01:00',
'2019-02-13 00:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_hours(self, config):
config["time_unit"] = "hours"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 11:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 09:00:00+01:00', '2019-01-31 10:00:00+01:00',
'2019-01-31 11:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_minutes(self, config):
config["time_unit"] = "minutes"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 02:15:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 02:06:00+01:00', '2019-01-31 02:07:00+01:00',
'2019-01-31 02:08:00+01:00', '2019-01-31 02:09:00+01:00',
'2019-01-31 02:10:00+01:00', '2019-01-31 02:11:00+01:00',
'2019-01-31 02:12:00+01:00', '2019-01-31 02:13:00+01:00',
'2019-01-31 02:14:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_seconds(self, config):
config["time_unit"] = "seconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 01:59:12').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:07+01:00', '2019-01-31 01:59:08+01:00',
'2019-01-31 01:59:09+01:00', '2019-01-31 01:59:10+01:00',
'2019-01-31 01:59:11+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_milliseconds(self, config):
config["time_unit"] = "milliseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.015000').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.007000+01:00',
'2019-01-31 01:59:00.008000+01:00',
'2019-01-31 01:59:00.009000+01:00',
'2019-01-31 01:59:00.010000+01:00',
'2019-01-31 01:59:00.011000+01:00',
'2019-01-31 01:59:00.012000+01:00',
'2019-01-31 01:59:00.013000+01:00',
'2019-01-31 01:59:00.014000+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_microseconds(self, config):
config["time_unit"] = "microseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.000016').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.000007+01:00',
'2019-01-31 01:59:00.000008+01:00',
'2019-01-31 01:59:00.000009+01:00',
'2019-01-31 01:59:00.000010+01:00',
'2019-01-31 01:59:00.000011+01:00',
'2019-01-31 01:59:00.000012+01:00',
'2019-01-31 01:59:00.000013+01:00',
'2019-01-31 01:59:00.000014+01:00',
'2019-01-31 01:59:00.000015+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_nanoseconds(self, config):
config["time_unit"] = "nanoseconds"
config["time_step"] = 1
start_time = pd.Timestamp('2019-01-31T00:59:00.000000000')
end_time = pd.Timestamp('2019-01-31T00:59:00.000000009')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2019-01-31 00:59:00.000000007',
'2019-01-31 00:59:00.000000008']))
|
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, img_size=32):
super(Generator, self).__init__()
# TODO: update to proper image size
self.init_size = img_size // 4
self.l1 = nn.Sequential(nn.Linear(10, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 1, 3, stride=1, padding=1), #3
nn.Tanh(),
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self, img_size=32):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(1, 16, bn=False), #3
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
# TODO: update to proper image size
ds_size = img_size // 2 ** 4
self.adv_layer = nn.Linear(128 * ds_size ** 2, 1)
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
|
# voom_mode_org.py
# Last Modified: 2013-10-31
# VOoM -- Vim two-pane outliner, plugin for Python-enabled Vim 7.x
# Website: http://www.vim.org/scripts/script.php?script_id=2657
# Author: Vlad Irnov (vlad DOT irnov AT gmail DOT com)
# License: CC0, see http://creativecommons.org/publicdomain/zero/1.0/
"""
VOoM markup mode for Emacs Org-mode headline format.
See |voom-mode-org|, ../../doc/voom.txt#*voom-mode-org*
"""
import re
headline_match = re.compile(r'^(\*+)\s').match
def hook_makeOutline(VO, blines):
"""Return (tlines, bnodes, levels) for Body lines blines.
blines is either Vim buffer object (Body) or list of buffer lines.
"""
Z = len(blines)
tlines, bnodes, levels = [], [], []
tlines_add, bnodes_add, levels_add = tlines.append, bnodes.append, levels.append
for i in xrange(Z):
if not blines[i].startswith('*'):
continue
bline = blines[i]
m = headline_match(bline)
if not m:
continue
lev = len(m.group(1))
head = bline[lev:].strip()
tline = ' %s|%s' %('. '*(lev-1), head)
tlines_add(tline)
bnodes_add(i+1)
levels_add(lev)
return (tlines, bnodes, levels)
def hook_newHeadline(VO, level, blnum, tlnum):
"""Return (tree_head, bodyLines).
tree_head is new headline string in Tree buffer (text after |).
bodyLines is list of lines to insert in Body buffer.
"""
tree_head = 'NewHeadline'
bodyLines = ['%s %s' %('*'*level, tree_head), '']
return (tree_head, bodyLines)
def hook_changeLevBodyHead(VO, h, levDelta):
"""Increase of decrease level number of Body headline by levDelta."""
if levDelta==0: return h
m = headline_match(h)
level = len(m.group(1))
return '%s%s' %('*'*(level+levDelta), h[m.end(1):])
|
Experiment(description='SE extrapolation experiment',
data_dir='../data/tsdlr_9010/',
max_depth=1,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=1000,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/2014-01-28-extrap-SE/',
iters=250,
base_kernels='SE',
random_seed=1,
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'})])
|
from os import listdir, getcwd
from os.path import isfile, join
from math import sin, cos
from setting_utils import timeLimit, heightLimit, input_stream
files = [f for f in listdir(join(getcwd(), 'uploads')) if isfile(join(getcwd(), 'uploads', f))]
files = [f for f in files if f.endswith(".txt")]
czml =(
'var height_time_data = {\n'
'data: [\n'
)
fileIndex = 0
for file in files:
czml += ('[');
FILE_PATH = join(getcwd(), 'uploads', str(file))
data = []
with open(FILE_PATH, 'r') as input_stream :
lines = input_stream.readlines()
for i in range( 4, len(lines)) : #avoid head text
words = lines[i].split(' ')
words = [x for x in words if len(x) > 0]
#---Setting---
minutes = float(words[0]) + float(words[1])/60
height = float(words[3])
if(minutes > timeLimit):
break
if(height > heightLimit):
break
#-------------
if (len(words)>15) : #avoid crash data
minutes = float(words[0]) + float(words[1])/60
data.append([ minutes, float(words[3])])
input_stream.close()
for j in range(0, len(data)) :
czml += ('[ %f, %f], ' %(data[j][0],data[j][1]))
fileIndex += 1
czml += ('], \n')
czml += (
'],\n'
'filename: ['
)
for file in files:
czml += ('"%s",' %(file))
czml += (
'],\n'
'xAxisName: "minute(s)",\n'
"yAxisName: 'meter(s)',\n"
'xMax: 0,\n'
'yMax: 0,\n'
'xMin: 1000,\n'
'yMin: 1000,\n'
'target: "height_time",\n'
'W: 800,\n'
'H: 400\n'
'}\n'
)
fout = open(join(getcwd(), 'balloon', 'data', 'height_time_data.js'), 'w')
fout.write(czml)
fout.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple Python module for validating BagIt profiles. See
https://github.com/bagit-profiles/bagit-profiles
for more information.
This module is intended for use with https://github.com/edsu/bagit but does not extend it.
Usage:
import bagit
import bagit_profile
# Instantiate an existing Bag using https://github.com/edsu/bagit.
bag = bagit.Bag('mydir')
# Instantiate a profile, supplying its URI.
my_profile = bagit_profile.Profile('http://example.com/bagitprofile.json')
# Validate 'Serialization' and 'Accept-Serialization'. This must be done
# before .validate(bag) is called. 'mydir' is the path to the Bag.
if my_profile.validate_serialization('mydir'):
print "Serialization validates"
else:
print "Serialization does not validate"
# Validate the rest of the profile.
if my_profile.validate(bag):
print "Validates"
else:
print "Does not validate"
"""
import json
import logging
import mimetypes
import sys
from fnmatch import fnmatch
from os import listdir, walk
from os.path import basename, exists, isdir, isfile, join, relpath, split
if sys.version_info > (3,):
basestring = str
from urllib.request import urlopen # pylint: no-name-in-module
else:
basestring = basestring
from urllib import urlopen # pylint: disable=no-name-in-module
# Define an exceptin class for use within this module.
class ProfileValidationError(Exception):
# TODO: or just 'pass' instead of __init__ and __str__
def __init__(self, value):
super(ProfileValidationError, self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
class ProfileValidationReport(object): # pylint: disable=useless-object-inheritance
def __init__(self):
self.errors = []
@property
def is_valid(self):
return not self.errors
def __str__(self):
if self.is_valid:
return "VALID"
return "INVALID: %s" % "\n ".join(["%s" % e for e in self.errors])
# Define the Profile class.
class Profile(object): # pylint: disable=useless-object-inheritance
_baginfo_profile_id_tag = "BagIt-Profile-Identifier"
def __init__(self, url, profile=None, ignore_baginfo_tag_case=False):
self.url = url
if profile is None:
profile = self.get_profile()
else:
if isinstance(profile, dict):
profile = profile
else:
profile = json.loads(profile)
self.validate_bagit_profile(profile)
# Report of the errors in the last run of validate
self.report = None
self.profile = profile
self.ignore_baginfo_tag_case = ignore_baginfo_tag_case
def _fail(self, msg):
logging.error(msg)
raise ProfileValidationError(msg)
def _warn(self, msg):
logging.error(msg)
def get_profile(self):
try:
f = urlopen(self.url)
profile = f.read()
if sys.version_info > (3,):
profile = profile.decode("utf-8")
profile = json.loads(profile)
except Exception as e: # pylint: disable=broad-except
print("Cannot retrieve profile from %s: %s", self.url, e)
logging.error("Cannot retrieve profile from %s: %s", self.url, e)
# This is a fatal error.
sys.exit(1)
return profile
# Call all the validate functions other than validate_bagit_profile(),
# which we've already called. 'Serialization' and 'Accept-Serialization'
# are validated in validate_serialization().
def validate(self, bag):
self.report = ProfileValidationReport()
for (fn, msg, min_version) in [
(self.validate_bag_info, "Error in bag-info.txt", None),
(self.validate_manifests_required, "Required manifests not found", None),
(
self.validate_tag_manifests_required,
"Required tag manifests not found",
None,
),
(self.validate_payload_manifests_allowed, "Disallowed payload manifests present", (1, 3, 0)),
(self.validate_tag_manifests_allowed, "Disallowed tag manifests present", (1, 3, 0)),
(self.validate_tag_files_required, "Required tag files not found", None),
(
self.validate_allow_fetch,
"fetch.txt is present but is not allowed",
None,
),
(
self.validate_accept_bagit_version,
"Required BagIt version not found",
None,
),
(self.validate_tag_files_allowed, "Tag files not allowed", (1, 2, 0)),
]:
try:
if min_version and self.profile_version_info < min_version:
logging.info(
"Skipping %s introduced in version %s (version validated: %s)",
fn,
min_version,
self.profile_version_info,
)
continue
fn(bag)
except ProfileValidationError as e:
# self._warn("%s: %s" % (msg, e))
self.report.errors.append(e)
return self.report.is_valid
def validate_bagit_profile(self, profile):
"""
Set default values for unspecified tags and validate the profile itself.
"""
if "Serialization" not in profile:
profile["Serialization"] = "optional"
if "Allow-Fetch.txt" not in profile:
profile["Allow-Fetch.txt"] = True
if (
"BagIt-Profile-Info" in profile
and "BagIt-Profile-Version" in profile["BagIt-Profile-Info"]
):
profile_version = profile["BagIt-Profile-Info"]["BagIt-Profile-Version"]
else:
profile_version = "1.1.0"
self.profile_version_info = tuple(int(i) for i in profile_version.split("."))
self.validate_bagit_profile_info(profile)
self.validate_bagit_profile_accept_bagit_versions(profile)
self.validate_bagit_profile_bag_info(profile)
# Check self.profile['bag-profile-info'] to see if "Source-Organization",
# "External-Description", "Version" and "BagIt-Profile-Identifier" are present.
def validate_bagit_profile_info(self, profile):
if "BagIt-Profile-Info" not in profile:
self._fail("%s: Required 'BagIt-Profile-Info' dict is missing." % profile)
if "Source-Organization" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'Source-Organization' tag is not in 'BagIt-Profile-Info'."
% profile
)
if "Version" not in profile["BagIt-Profile-Info"]:
self._warn(
"%s: Required 'Version' tag is not in 'BagIt-Profile-Info'." % profile
)
return False
if "BagIt-Profile-Identifier" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'BagIt-Profile-Identifier' tag is not in 'BagIt-Profile-Info'."
% profile
)
return True
def validate_bagit_profile_accept_bagit_versions(self, profile):
"""
Ensure all versions in 'Accept-BagIt-Version' are strings
"""
if "Accept-BagIt-Version" in profile:
for version_number in profile["Accept-BagIt-Version"]:
# pylint: disable=undefined-variable
if not isinstance(version_number, basestring):
raise ProfileValidationError(
'Version number "%s" in "Accept-BagIt-Version" is not a string!'
% version_number
)
return True
def validate_bagit_profile_bag_info(self, profile):
if 'Bag-Info' in profile:
for tag in profile['Bag-Info']:
config = profile['Bag-Info'][tag]
if self.profile_version_info >= (1, 3, 0) and \
'description' in config and not isinstance(config['description'], basestring):
self._fail("%s: Profile Bag-Info '%s' tag 'description' property, when present, must be a string." %
(profile, tag))
return True
# Validate tags in self.profile['Bag-Info'].
def validate_bag_info(self, bag):
# First, check to see if bag-info.txt exists.
path_to_baginfotxt = join(bag.path, "bag-info.txt")
if not exists(path_to_baginfotxt):
self._fail("%s: bag-info.txt is not present." % bag)
# Then check for the required 'BagIt-Profile-Identifier' tag and ensure it has the same value
# as self.url.
if self.ignore_baginfo_tag_case:
bag_info = {self.normalize_tag(k): v for k, v in bag.info.items()}
ignore_tag_case_help = ""
else:
bag_info = bag.info
ignore_tag_case_help = " Set 'ignore_baginfo_tag_case' to True if you wish to ignore tag case."
profile_id_tag = self.normalize_tag(self._baginfo_profile_id_tag)
if profile_id_tag not in bag_info:
self._fail(
("%s: Required '%s' tag is not in bag-info.txt." + ignore_tag_case_help)
% (bag, self._baginfo_profile_id_tag)
)
else:
if bag_info[profile_id_tag] != self.url:
self._fail(
"%s: '%s' tag does not contain this profile's URI: <%s> != <%s>"
% (bag, profile_id_tag, bag_info[profile_id_tag], self.url)
)
# Then, iterate through self.profile['Bag-Info'] and if a key has a dict containing a 'required' key that is
# True, check to see if that key exists in bag.info.
for tag in self.profile["Bag-Info"]:
normalized_tag = self.normalize_tag(tag)
config = self.profile["Bag-Info"][tag]
if "required" in config and config["required"] is True:
if normalized_tag not in bag_info:
self._fail(
("%s: Required tag '%s' is not present in bag-info.txt." + ignore_tag_case_help)
% (bag, tag)
)
# If the tag is in bag-info.txt, check to see if the value is constrained.
if "values" in config and normalized_tag in bag_info:
if bag_info[normalized_tag] not in config["values"]:
self._fail(
"%s: Required tag '%s' is present in bag-info.txt but does not have an allowed value ('%s')."
% (bag, tag, bag_info[normalized_tag])
)
# If the tag is nonrepeatable, make sure it only exists once. We do this by checking to see if the value for the key is a list.
if "repeatable" in config and config["repeatable"] is False:
value = bag_info.get(normalized_tag)
if isinstance(value, list):
self._fail(
"%s: Nonrepeatable tag '%s' occurs %s times in bag-info.txt."
% (bag, tag, len(value))
)
return True
# Normalize to canonical lowercase, if profile is ignoring bag-info.txt tag case.
def normalize_tag(self, tag):
return tag if not self.ignore_baginfo_tag_case else tag.lower()
# For each member of self.profile['manifests_required'], throw an exception if
# the manifest file is not present.
def validate_manifests_required(self, bag):
for manifest_type in self.profile["Manifests-Required"]:
path_to_manifest = join(bag.path, "manifest-" + manifest_type + ".txt")
if not exists(path_to_manifest):
self._fail(
"%s: Required manifest type '%s' is not present in Bag."
% (bag, manifest_type)
)
return True
# For each member of self.profile['tag_manifests_required'], throw an exception if
# the tag manifest file is not present.
def validate_tag_manifests_required(self, bag):
# Tag manifests are optional, so we return True if none are defined in the profile.
if "Tag-Manifests-Required" not in self.profile:
return True
for tag_manifest_type in self.profile["Tag-Manifests-Required"]:
path_to_tag_manifest = join(
bag.path, "tagmanifest-" + tag_manifest_type + ".txt"
)
if not exists(path_to_tag_manifest):
self._fail(
"%s: Required tag manifest type '%s' is not present in Bag."
% (bag, tag_manifest_type)
)
return True
@staticmethod
def manifest_algorithms(manifest_files):
for filepath in manifest_files:
filename = basename(filepath)
if filename.startswith("tagmanifest-"):
prefix = "tagmanifest-"
else:
prefix = "manifest-"
algorithm = filename.replace(prefix, "").replace(".txt", "")
yield algorithm
def validate_tag_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="tag",
manifests_present=self.manifest_algorithms(bag.tagmanifest_files()),
allowed_attribute="Tag-Manifests-Allowed",
required_attribute="Tag-Manifests-Required")
def validate_payload_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="payload",
manifests_present=self.manifest_algorithms(bag.manifest_files()),
allowed_attribute="Manifests-Allowed",
required_attribute="Manifests-Required")
def _validate_allowed_manifests(self, bag, manifest_type=None, manifests_present=None,
allowed_attribute=None, required_attribute=None):
if allowed_attribute not in self.profile:
return True
allowed = self.profile[allowed_attribute]
required = self.profile[required_attribute] if required_attribute in self.profile else []
required_but_not_allowed = [alg for alg in required if alg not in allowed]
if required_but_not_allowed:
self._fail("%s: Required %s manifest type(s) %s not allowed by %s" %
(bag, manifest_type, [str(a) for a in required_but_not_allowed], allowed_attribute))
present_but_not_allowed = [alg for alg in manifests_present if alg not in allowed]
if present_but_not_allowed:
self._fail("%s: Unexpected %s manifest type(s) '%s' present, but not allowed by %s" %
(bag, manifest_type, [str(a) for a in present_but_not_allowed], allowed_attribute))
return True
def validate_tag_files_allowed(self, bag):
"""
Validate the ``Tag-Files-Allowed`` tag.
"""
allowed = (
self.profile["Tag-Files-Allowed"]
if "Tag-Files-Allowed" in self.profile
else ["*"]
)
required = (
self.profile["Tag-Files-Required"]
if "Tag-Files-Required" in self.profile
else []
)
# For each member of 'Tag-Files-Required' ensure it is also in 'Tag-Files-Allowed'.
required_but_not_allowed = [f for f in required if not fnmatch_any(f, allowed)]
if required_but_not_allowed:
self._fail(
"%s: Required tag files '%s' not listed in Tag-Files-Allowed"
% (bag, required_but_not_allowed)
)
# For each tag file in the bag base directory, ensure it is also in 'Tag-Files-Allowed'.
for tag_file in find_tag_files(bag.path):
tag_file = relpath(tag_file, bag.path)
if not fnmatch_any(tag_file, allowed):
self._fail(
"%s: Existing tag file '%s' is not listed in Tag-Files-Allowed."
% (bag, tag_file)
)
# For each member of self.profile['Tag-Files-Required'], throw an exception if
# the path does not exist.
def validate_tag_files_required(self, bag):
# Tag files are optional, so we return True if none are defined in the profile.
if "Tag-Files-Required" not in self.profile:
return True
for tag_file in self.profile["Tag-Files-Required"]:
path_to_tag_file = join(bag.path, tag_file)
if not exists(path_to_tag_file):
self._fail(
"%s: Required tag file '%s' is not present in Bag."
% (bag, path_to_tag_file)
)
return True
# Check to see if this constraint is False, and if it is, then check to see
# if the fetch.txt file exists. If it does, throw an exception.
def validate_allow_fetch(self, bag):
if self.profile["Allow-Fetch.txt"] is False:
path_to_fetchtxt = join(bag.path, "fetch.txt")
if exists(path_to_fetchtxt):
self._fail("%s: Fetch.txt is present but is not allowed." % bag)
return True
# Check the Bag's version, and if it's not in the list of allowed versions,
# throw an exception.
def validate_accept_bagit_version(self, bag):
actual = bag.tags["BagIt-Version"]
allowed = self.profile["Accept-BagIt-Version"]
if actual not in allowed:
self._fail(
"%s: Bag version '%s' is not in list of allowed values: %s"
% (bag, actual, allowed)
)
return True
# Perform tests on 'Serialization' and 'Accept-Serialization', in one function.
# Since https://github.com/edsu/bagit can't tell us if a Bag is serialized or
# not, we need to pass this function the path to the Bag, not the object. Also,
# this method needs to be called before .validate().
def validate_serialization(self, path_to_bag):
# First, perform the two negative tests.
if not exists(path_to_bag):
raise IOError("Can't find file %s" % path_to_bag)
if self.profile["Serialization"] == "required" and isdir(path_to_bag):
self._fail(
"%s: Bag serialization is required but Bag is a directory."
% path_to_bag
)
if self.profile["Serialization"] == "forbidden" and isfile(path_to_bag):
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
# Then test to see whether the Bag is serialized (is a file) and whether the mimetype is one
# of the allowed types.
if (
self.profile["Serialization"] == "required"
or self.profile["Serialization"] == "optional"
and isfile(path_to_bag)
):
_, bag_file = split(path_to_bag)
mtype = mimetypes.guess_type(bag_file)
if mtype[0] not in self.profile["Accept-Serialization"]:
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
# If we have passed the serialization tests, return True.
return True
# Return true if any of the pattern fnmatches a file path
def fnmatch_any(f, pats):
for pat in pats:
if fnmatch(f, pat):
return True
return False
# Find tag files
def find_tag_files(bag_dir):
for root, _, basenames in walk(bag_dir):
reldir = relpath(root, bag_dir)
for basename in basenames:
if fnmatch(reldir, "data*") or (
reldir == "."
and fnmatch_any(
basename,
[
"manifest-*.txt",
"bag-info.txt",
"tagmanifest-*.txt",
"bagit.txt",
"fetch.txt",
],
)
):
continue
fpath = join(root, basename)
if isfile(fpath):
yield fpath
def _configure_logging(args):
import time
log_format = "%(asctime)s - %(levelname)s - %(message)s"
if args.quiet:
args.loglevel = "ERROR"
level = logging.getLevelName(args.loglevel)
if args.no_logfile:
logging.basicConfig(level=level, format=log_format)
else:
if args.logdir:
filename = join(
args.log + "/logs", "BagitProfile_" + time.strftime("%y_%m_%d") + ".log"
)
else:
filename = "BagitProfile%s.log" % time.strftime("%y_%m_%d")
logging.basicConfig(filename=filename, level=level, format=log_format)
def _main():
# Command-line version.
import bagit
from argparse import ArgumentParser
from pkg_resources import get_distribution
parser = ArgumentParser(description="Validate BagIt bags against BagIt profiles")
parser.add_argument(
"--version",
action="version",
version="%(prog)s, v" + get_distribution("bagit_profile").version,
)
parser.add_argument(
"--quiet",
action="store_true",
help="Suppress all output except errors. Default: %(default)s",
)
parser.add_argument(
"-i", "--ignore-baginfo-tag-case",
dest="ignore_baginfo_tag_case",
action="store_true",
help="Ignore capitalization for Bag-Info tag names. Default: %(default)s",
)
parser.add_argument(
"--log", dest="logdir", help="Log directory. Default: %(default)s"
)
parser.add_argument(
"--no-logfile",
action="store_true",
help="Do not log to a log file. Default: %(default)s",
)
parser.add_argument(
"--loglevel",
default="INFO",
choices=("DEBUG", "INFO", "ERROR"),
help="Log level. Default: %(default)s",
)
parser.add_argument(
"--file", help="Load profile from FILE, not by URL. Default: %(default)s."
)
parser.add_argument(
"--report",
action="store_true",
help="Print validation report. Default: %(default)s",
)
parser.add_argument(
"--skip",
action="append",
default=[],
help="Skip validation steps. Default: %(default)s",
choices=("serialization", "profile"),
)
parser.add_argument("profile_url", nargs=1)
parser.add_argument("bagit_path", nargs=1)
args = parser.parse_args()
profile_url = args.profile_url[0]
bagit_path = args.bagit_path[0]
_configure_logging(args)
# Instantiate a profile, supplying its URI.
if args.file:
with open(args.file, "r") as local_file:
profile = Profile(profile_url, profile=local_file.read(),
ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
else:
profile = Profile(profile_url, ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
# Instantiate an existing Bag.
bag = bagit.Bag(bagit_path) # pylint: disable=no-member
# Validate 'Serialization' and 'Accept-Serialization', then perform general validation.
if "serialization" not in args.skip:
if profile.validate_serialization(bagit_path):
print(u"✓ Serialization validates")
else:
print(u"✗ Serialization does not validate")
sys.exit(1)
# Validate the rest of the profile.
if "profile" not in args.skip:
if profile.validate(bag):
print(u"✓ Validates against %s" % profile_url)
else:
print(u"✗ Does not validate against %s" % profile_url)
if args.report:
print(profile.report)
sys.exit(2)
if __name__ == "__main__":
_main()
|
# -*- coding: utf-8 -*-
from asyncy.Exceptions import StoryscriptError
from asyncy.Sentry import Sentry
from raven import Client
def test_init(patch):
# noinspection PyTypeChecker
Sentry.init(None, None) # No-op.
patch.init(Client)
Sentry.init('sentry_dsn', 'release_ver')
Client.__init__.assert_called_with(
dsn='sentry_dsn',
enable_breadcrumbs=False,
install_logging_hook=False,
hook_libraries=[],
release='release_ver')
# noinspection PyProtectedMember
assert Sentry._sentry_client is not None
def test_capture_exc(patch, magic):
patch.many(Client, ['captureException', 'user_context'])
Sentry.init('https://foo:foo@sentry.io/123', 'release_ver')
story = magic()
story.app.app_id = 'app_id'
story.app.version = 'app_version'
story.name = 'story_name'
line = magic()
line['ln'] = '28'
try:
raise StoryscriptError(message='foo', story=story, line=line)
except StoryscriptError as e:
Sentry.capture_exc(e, story, line, {'foo': 'bar'})
Client.user_context.assert_called_with({
'app_uuid': 'app_id',
'app_version': 'app_version'
})
Client.captureException.assert_called_with(extra={
'story_line': line['ln'],
'story_name': 'story_name',
'foo': 'bar'
})
|
from constants import *
from gateway_protocol import Gateway
from api import DiscordAPI
import bot_config as config
import logging as log
log.basicConfig(encoding='utf-8', level=log.DEBUG)
class Bot(object):
def __init__(self, token):
self.g = Gateway(token)
self.api = DiscordAPI(token)
def run_gateway(self):
self.g.run()
def event(self, f):
return self.g.event(f)
if __name__ == "__main__":
print("=== bot startup ===")
cfg = config.from_file("config.json")
log_level = log.getLevelName(cfg.log_level)
bot = Bot(cfg.token)
@bot.event
async def ready(x):
log.info("gateway connection ready")
@bot.event
async def message_reaction_add(msg):
emoji = msg.data.emoji["name"]
if msg.data.message_id != cfg.message_id:
# wrong message, do nothing
log.debug(f"wrong message id, skipping")
return
if emoji not in cfg.emoji:
# unknown emoji, do nothing
log.debug(f"unknown emoji, skipping")
return
event_type = cfg.emoji[emoji]
if event_type == "announcement":
user_id = msg.data.user_id
log.info(f"adding announce role to {user_id}")
bot.api.run(f"/guilds/{GUILD_ID}/members/{user_id}/roles/{ANNOUNCEMENT_ROLE}", "PUT")
bot.run_gateway()
|
# script to upload a file to zenodo sandbox via api
# seperate sandbox- and real-zenodo accounts and ACCESS_TOKENs each need to be created
# to adapt this script to real-zenodo (from sandbox implementation):
# update urls to zenodo.org from sandbox.zenodo.org
# update SANDBOX_TOKEN to a ACCESS_TOKEN from real-zenodo
import sys, json, requests
import pandas as pd
studyid = sys.argv[1]
file_dir = sys.argv[2]
access_token = sys.argv[3]
data_dir = file_dir+'/ewas-sum-stats/to-add/'+studyid
zfile=data_dir+'/zenodo.csv'
try:
zdata = pd.read_csv(zfile)
except FileNotFoundError:
print("Can't find the file "+zfile)
sys.exit()
print('Starting Zenodo upload process')
# specify ACCESS_TOKEN
# this needs to be generated for each sanbox/real account
ACCESS_TOKEN = access_token
# create empty upload
headers = {"Content-Type": "application/json"}
r = requests.post('https://zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
r.status_code
r.json()
# Get the deposition id from the previous response
# Upload the file to be deposited to Zenodo
deposition_id = r.json()['id']
data = {'name': 'results.csv'}
files = {'file': open(data_dir+'/results.csv')}
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
r.status_code
r.json()
# specify and attach the metadata for the upload
title = zdata.loc[0, 'title']
authors = zdata.loc[0, 'authors']
desc = zdata.loc[0, 'desc']
desc = desc + '\n\n' + 'Upload of this dataset was completed by The EWAS Catalog team. The data can be queried along with hundreds of other EWAS at ewascatalog.org. To upload your EWAS summary statistics and have a zenodo DOI generated for you go to ewascatalog.org/upload'
data = {'metadata':
{'title': title,
'upload_type': 'dataset',
'description': desc,
'creators': [{'name': authors}]}}
r = requests.put('https://zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
# r = requests.put('https://sandbox.zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
r.status_code
r.json()
# publish
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
status_code = r.status_code
if status_code != 202:
raise ValueError("Status code was" + str(status_code) + " and it should be 202. Check zenodo")
else:
print("Status code is 202. Happy days!")
# should be: 202
|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from functools import partial
import gevent
from mars.tests.core import patch_method
from mars.utils import get_next_port
from mars.actors import create_actor_pool
from mars.promise import PromiseActor
from mars.worker import *
from mars.worker.tests.base import WorkerCase
class TaskActor(PromiseActor):
def __init__(self, queue_name, call_records):
super(TaskActor, self).__init__()
self._queue_name = queue_name
self._call_records = call_records
self._dispatch_ref = None
def post_create(self):
self._dispatch_ref = self.promise_ref(DispatchActor.default_name())
self._dispatch_ref.register_free_slot(self.uid, self._queue_name)
def queued_call(self, key, delay):
try:
self._call_records[key] = time.time()
gevent.sleep(delay)
finally:
self._dispatch_ref.register_free_slot(self.uid, self._queue_name)
class Test(WorkerCase):
@patch_method(DispatchActor._init_chunk_store)
def testDispatch(self, *_):
call_records = dict()
group_size = 4
mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
with create_actor_pool(n_process=1, backend='gevent',
address=mock_scheduler_addr) as pool:
dispatch_ref = pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
# actors of g1
[pool.create_actor(TaskActor, 'g1', call_records) for _ in range(group_size)]
[pool.create_actor(TaskActor, 'g2', call_records) for _ in range(group_size)]
self.assertEqual(len(dispatch_ref.get_slots('g1')), group_size)
self.assertEqual(len(dispatch_ref.get_slots('g2')), group_size)
self.assertEqual(len(dispatch_ref.get_slots('g3')), 0)
self.assertEqual(dispatch_ref.get_hash_slot('g1', 'hash_str'),
dispatch_ref.get_hash_slot('g1', 'hash_str'))
dispatch_ref.get_free_slot('g1', callback=(('NonExist', mock_scheduler_addr), '_non_exist', {}))
self.assertEqual(dispatch_ref.get_free_slots_num().get('g1'), group_size)
# tasks within [0, group_size - 1] will run almost simultaneously,
# while the last one will be delayed due to lack of slots
with self.run_actor_test(pool) as test_actor:
from mars.promise import Promise
p = Promise(done=True)
_dispatch_ref = test_actor.promise_ref(DispatchActor.default_name())
def _call_on_dispatched(uid, key=None):
if uid is None:
call_records[key] = 'NoneUID'
else:
test_actor.promise_ref(uid).queued_call(key, 2, _tell=True)
for idx in range(group_size + 1):
p = p.then(lambda *_: _dispatch_ref.get_free_slot('g1', _promise=True)) \
.then(partial(_call_on_dispatched, key='%d_1' % idx)) \
.then(lambda *_: _dispatch_ref.get_free_slot('g2', _promise=True)) \
.then(partial(_call_on_dispatched, key='%d_2' % idx))
p.then(lambda *_: _dispatch_ref.get_free_slot('g3', _promise=True)) \
.then(partial(_call_on_dispatched, key='N_1')) \
.then(lambda *_: test_actor.set_result(None))
self.get_result(20)
self.assertEqual(call_records['N_1'], 'NoneUID')
self.assertLess(sum(abs(call_records['%d_1' % idx] - call_records['0_1'])
for idx in range(group_size)), 1)
self.assertGreater(call_records['%d_1' % group_size] - call_records['0_1'], 1)
self.assertLess(call_records['%d_1' % group_size] - call_records['0_1'], 3)
dispatch_ref.destroy()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.