hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1953ed8b25ceb332b90b74dd4c85037513668d
| 42,654
|
py
|
Python
|
youtube_dl/options.py
|
wesson09/youtube-dl
|
9463b52a600662fe3b67c90f7fbf68ca709c5705
|
[
"Unlicense"
] | null | null | null |
youtube_dl/options.py
|
wesson09/youtube-dl
|
9463b52a600662fe3b67c90f7fbf68ca709c5705
|
[
"Unlicense"
] | null | null | null |
youtube_dl/options.py
|
wesson09/youtube-dl
|
9463b52a600662fe3b67c90f7fbf68ca709c5705
|
[
"Unlicense"
] | 1
|
2021-09-23T11:03:21.000Z
|
2021-09-23T11:03:21.000Z
|
from __future__ import unicode_literals
import os.path
import optparse
import re
import sys
from .downloader.external import list_external_downloaders
from .compat import (
compat_expanduser,
compat_get_terminal_size,
compat_getenv,
compat_kwargs,
compat_shlex_split,
)
from .utils import (
preferredencoding,
write_string,
)
from .version import __version__
def _hide_login_info(opts):
PRIVATE_OPTS = set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'])
eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
def _scrub_eq(o):
m = eqre.match(o)
if m:
return m.group('key') + '=PRIVATE'
else:
return o
opts = list(map(_scrub_eq, opts))
for idx, opt in enumerate(opts):
if opt in PRIVATE_OPTS and idx + 1 < len(opts):
opts[idx + 1] = 'PRIVATE'
return opts
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read()
if sys.version_info < (3,):
contents = contents.decode(preferredencoding())
res = compat_shlex_split(contents, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
opts.append(' %s' % option.metavar)
return ''.join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
# No need to wrap help messages if we're on a wide console
columns = compat_get_terminal_size().columns
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version': __version__,
'formatter': fmt,
'usage': '%prog [OPTIONS] URL [URL...]',
'conflict_handler': 'resolve',
}
parser = optparse.OptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, 'General Options')
general.add_option(
'-h', '--help',
action='help',
help='Print this help text and exit')
general.add_option(
'--version',
action='version',
help='Print program version and exit')
general.add_option(
'-U', '--update',
action='store_true', dest='update_self',
help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option(
'-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', default=False,
help='Continue on download errors, for example to skip unavailable videos in a playlist')
general.add_option(
'--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option(
'--dump-user-agent',
action='store_true', dest='dump_user_agent', default=False,
help='Display the current browser identification')
general.add_option(
'--list-extractors',
action='store_true', dest='list_extractors', default=False,
help='List all supported extractors')
general.add_option(
'--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions', default=False,
help='Output descriptions of all supported extractors')
general.add_option(
'--force-generic-extractor',
action='store_true', dest='force_generic_extractor', default=False,
help='Force extraction to use the generic extractor')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. '
'When given in the global configuration file /etc/youtube-dl.conf: '
'Do not read the user configuration in ~/.config/youtube-dl/config '
'(%APPDATA%/youtube-dl/config.txt on Windows)')
general.add_option(
'--config-location',
dest='config_location', metavar='PATH',
help='Location of the configuration file; either the path to the config or its containing directory.')
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist',
default=False,
help='Do not extract the videos of a playlist, only list them.')
general.add_option(
'--mark-watched',
action='store_true', dest='mark_watched', default=False,
help='Mark videos watched (YouTube only)')
general.add_option(
'--no-mark-watched',
action='store_false', dest='mark_watched', default=False,
help='Do not mark videos watched (YouTube only)')
general.add_option(
'--no-color', '--no-colors',
action='store_true', dest='no_color',
default=False,
help='Do not emit color codes in output')
network = optparse.OptionGroup(parser, 'Network Options')
network.add_option(
'--proxy', dest='proxy',
default=None, metavar='URL',
help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable '
'SOCKS proxy, specify a proper scheme. For example '
'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") '
'for direct connection')
network.add_option(
'--socket-timeout',
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
help='Time to wait before giving up, in seconds')
network.add_option(
'--source-address',
metavar='IP', dest='source_address', default=None,
help='Client-side IP address to bind to',
)
network.add_option(
'-4', '--force-ipv4',
action='store_const', const='0.0.0.0', dest='source_address',
help='Make all connections via IPv4',
)
network.add_option(
'-6', '--force-ipv6',
action='store_const', const='::', dest='source_address',
help='Make all connections via IPv6',
)
geo = optparse.OptionGroup(parser, 'Geo Restriction')
geo.add_option(
'--geo-verification-proxy',
dest='geo_verification_proxy', default=None, metavar='URL',
help='Use this proxy to verify the IP address for some geo-restricted sites. '
'The default proxy specified by --proxy (or none, if the option is not present) is used for the actual downloading.')
geo.add_option(
'--cn-verification-proxy',
dest='cn_verification_proxy', default=None, metavar='URL',
help=optparse.SUPPRESS_HELP)
geo.add_option(
'--geo-bypass',
action='store_true', dest='geo_bypass', default=True,
help='Bypass geographic restriction via faking X-Forwarded-For HTTP header')
geo.add_option(
'--no-geo-bypass',
action='store_false', dest='geo_bypass', default=True,
help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header')
geo.add_option(
'--geo-bypass-country', metavar='CODE',
dest='geo_bypass_country', default=None,
help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code')
geo.add_option(
'--geo-bypass-ip-block', metavar='IP_BLOCK',
dest='geo_bypass_ip_block', default=None,
help='Force bypass geographic restriction with explicitly provided IP block in CIDR notation')
selection = optparse.OptionGroup(parser, 'Video Selection')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='Playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='Playlist video to end at (default is last)')
selection.add_option(
'--playlist-items',
dest='playlist_items', metavar='ITEM_SPEC', default=None,
help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
help='Download only matching titles (regex or caseless sub-string)')
selection.add_option(
'--reject-title',
dest='rejecttitle', metavar='REGEX',
help='Skip download for matching titles (regex or caseless sub-string)')
selection.add_option(
'--max-downloads',
dest='max_downloads', metavar='NUMBER', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option(
'--min-filesize',
metavar='SIZE', dest='min_filesize', default=None,
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--max-filesize',
metavar='SIZE', dest='max_filesize', default=None,
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--date',
metavar='DATE', dest='date', default=None,
help='Download only videos uploaded in this date')
selection.add_option(
'--datebefore',
metavar='DATE', dest='datebefore', default=None,
help='Download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter',
metavar='DATE', dest='dateafter', default=None,
help='Download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views',
metavar='COUNT', dest='min_views', default=None, type=int,
help='Do not download any videos with less than COUNT views')
selection.add_option(
'--max-views',
metavar='COUNT', dest='max_views', default=None, type=int,
help='Do not download any videos with more than COUNT views')
selection.add_option(
'--match-filter',
metavar='FILTER', dest='match_filter', default=None,
help=(
'Generic video filter. '
'Specify any key (see the "OUTPUT TEMPLATE" for a list of available keys) to '
'match if the key is present, '
'!key to check if the key is not present, '
'key > NUMBER (like "comment_count > 12", also works with '
'>=, <, <=, !=, =) to compare against a number, '
'key = \'LITERAL\' (like "uploader = \'Mike Smith\'", also works with !=) '
'to match against a string literal '
'and & to require multiple matches. '
'Values which are not known are excluded unless you '
'put a question mark (?) after the operator. '
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
'--no-playlist',
action='store_true', dest='noplaylist', default=False,
help='Download only the video, if the URL refers to a video and a playlist.')
selection.add_option(
'--yes-playlist',
action='store_false', dest='noplaylist', default=False,
help='Download the playlist, if the URL refers to a video and a playlist.')
selection.add_option(
'--age-limit',
metavar='YEARS', dest='age_limit', default=None, type=int,
help='Download only videos suitable for the given age')
selection.add_option(
'--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads',
dest='include_ads', action='store_true',
help='Download advertisements as well (experimental)')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option(
'-u', '--username',
dest='username', metavar='USERNAME',
help='Login with this account ID')
authentication.add_option(
'-p', '--password',
dest='password', metavar='PASSWORD',
help='Account password. If this option is left out, youtube-dl will ask interactively.')
authentication.add_option(
'-2', '--twofactor',
dest='twofactor', metavar='TWOFACTOR',
help='Two-factor authentication code')
authentication.add_option(
'-n', '--netrc',
action='store_true', dest='usenetrc', default=False,
help='Use .netrc authentication data')
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
help='Video password (vimeo, youku)')
adobe_pass = optparse.OptionGroup(parser, 'Adobe Pass Options')
adobe_pass.add_option(
'--ap-mso',
dest='ap_mso', metavar='MSO',
help='Adobe Pass multiple-system operator (TV provider) identifier, use --ap-list-mso for a list of available MSOs')
adobe_pass.add_option(
'--ap-username',
dest='ap_username', metavar='USERNAME',
help='Multiple-system operator account login')
adobe_pass.add_option(
'--ap-password',
dest='ap_password', metavar='PASSWORD',
help='Multiple-system operator account password. If this option is left out, youtube-dl will ask interactively.')
adobe_pass.add_option(
'--ap-list-mso',
action='store_true', dest='ap_list_mso', default=False,
help='List all supported multiple-system operators')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='Video format code, see the "FORMAT SELECTION" for all the info')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
help='Download all available video formats')
video_format.add_option(
'--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False,
help='Prefer free video formats unless a specific one is requested')
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
help='List all available formats of requested videos')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
video_format.add_option(
'--youtube-skip-dash-manifest',
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifests and related data on YouTube videos')
video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
help=(
'If a merge is required (e.g. bestvideo+bestaudio), '
'output to given container format. One of mkv, mp4, ogg, webm, flv. '
'Ignored if no merge is required'))
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
subtitles.add_option(
'--write-sub', '--write-srt',
action='store_true', dest='writesubtitles', default=False,
help='Write subtitle file')
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
help='Write automatically generated subtitle file (YouTube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
help='Download all the available subtitles of the video')
subtitles.add_option(
'--list-subs',
action='store_true', dest='listsubtitles', default=False,
help='List all available subtitles for the video')
subtitles.add_option(
'--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"')
subtitles.add_option(
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'-r', '--limit-rate', '--rate-limit',
dest='ratelimit', metavar='RATE',
help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option(
'-R', '--retries',
dest='retries', metavar='RETRIES', default=10,
help='Number of retries (default is %default), or "infinite".')
downloader.add_option(
'--fragment-retries',
dest='fragment_retries', metavar='RETRIES', default=10,
help='Number of retries for a fragment (default is %default), or "infinite" (DASH, hlsnative and ISM)')
downloader.add_option(
'--skip-unavailable-fragments',
action='store_true', dest='skip_unavailable_fragments', default=True,
help='Skip unavailable fragments (DASH, hlsnative and ISM)')
downloader.add_option(
'--abort-on-unavailable-fragment',
action='store_false', dest='skip_unavailable_fragments',
help='Abort downloading when some fragment is not available')
downloader.add_option(
'--keep-fragments',
action='store_true', dest='keep_fragments', default=False,
help='Keep downloaded fragments on disk after downloading is finished; fragments are erased by default')
downloader.add_option(
'--buffer-size',
dest='buffersize', metavar='SIZE', default='1024',
help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
downloader.add_option(
'--no-resize-buffer',
action='store_true', dest='noresizebuffer', default=False,
help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
downloader.add_option(
'--http-chunk-size',
dest='http_chunk_size', metavar='SIZE', default=None,
help='Size of a chunk for chunk-based HTTP downloading (e.g. 10485760 or 10M) (default is disabled). '
'May be useful for bypassing bandwidth throttling imposed by a webserver (experimental)')
downloader.add_option(
'--test',
action='store_true', dest='test', default=False,
help=optparse.SUPPRESS_HELP)
downloader.add_option(
'--playlist-reverse',
action='store_true',
help='Download playlist videos in reverse order')
downloader.add_option(
'--playlist-random',
action='store_true',
help='Download playlist videos in random order')
downloader.add_option(
'--xattr-set-filesize',
dest='xattr_set_filesize', action='store_true',
help='Set file xattribute ytdl.filesize with expected file size')
downloader.add_option(
'--hls-prefer-native',
dest='hls_prefer_native', action='store_true', default=None,
help='Use the native HLS downloader instead of ffmpeg')
downloader.add_option(
'--hls-prefer-ffmpeg',
dest='hls_prefer_native', action='store_false', default=None,
help='Use ffmpeg instead of the native HLS downloader')
downloader.add_option(
'--hls-use-mpegts',
dest='hls_use_mpegts', action='store_true',
help='Use the mpegts container for HLS videos, allowing to play the '
'video while downloading (some players may not be able to play it)')
downloader.add_option(
'--external-downloader',
dest='external_downloader', metavar='COMMAND',
help='Use the specified external downloader. '
'Currently supports %s' % ','.join(list_external_downloaders()))
downloader.add_option(
'--external-downloader-args',
dest='external_downloader_args', metavar='ARGS',
help='Give these arguments to the external downloader')
workarounds = optparse.OptionGroup(parser, 'Workarounds')
workarounds.add_option(
'--encoding',
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation')
workarounds.add_option(
'--prefer-insecure',
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent',
metavar='UA', dest='user_agent',
help='Specify a custom user agent')
workarounds.add_option(
'--referer',
metavar='URL', dest='referer', default=None,
help='Specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header',
metavar='FIELD:VALUE', dest='headers', action='append',
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround',
dest='bidi_workaround', action='store_true',
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
workarounds.add_option(
'--sleep-interval', '--min-sleep-interval', metavar='SECONDS',
dest='sleep_interval', type=float,
help=(
'Number of seconds to sleep before each download when used alone '
'or a lower bound of a range for randomized sleep before each download '
'(minimum possible number of seconds to sleep) when used along with '
'--max-sleep-interval.'))
workarounds.add_option(
'--max-sleep-interval', metavar='SECONDS',
dest='max_sleep_interval', type=float,
help=(
'Upper bound of a range for randomized sleep before each download '
'(maximum possible number of seconds to sleep). Must only be used '
'along with --min-sleep-interval.'))
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
verbosity.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='Activate quiet mode')
verbosity.add_option(
'--dumpregex',
action='store_true', dest='dumpregex', default=False,
help='Dump extractors regular expressions')
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option(
'-s', '--simulate',
action='store_true', dest='simulate', default=False,
help='Do not download the video and do not write anything to disk')
verbosity.add_option(
'--skip-download',
action='store_true', dest='skip_download', default=False,
help='Do not download the video')
verbosity.add_option(
'-g', '--get-url',
action='store_true', dest='geturl', default=False,
help='Simulate, quiet but print URL')
verbosity.add_option(
'-e', '--get-title',
action='store_true', dest='gettitle', default=False,
help='Simulate, quiet but print title')
verbosity.add_option(
'--get-id',
action='store_true', dest='getid', default=False,
help='Simulate, quiet but print id')
verbosity.add_option(
'--get-thumbnail',
action='store_true', dest='getthumbnail', default=False,
help='Simulate, quiet but print thumbnail URL')
verbosity.add_option(
'--get-description',
action='store_true', dest='getdescription', default=False,
help='Simulate, quiet but print video description')
verbosity.add_option(
'--get-duration',
action='store_true', dest='getduration', default=False,
help='Simulate, quiet but print video length')
verbosity.add_option(
'--get-filename',
action='store_true', dest='getfilename', default=False,
help='Simulate, quiet but print output filename')
verbosity.add_option(
'--get-format',
action='store_true', dest='getformat', default=False,
help='Simulate, quiet but print output format')
verbosity.add_option(
'-j', '--dump-json',
action='store_true', dest='dumpjson', default=False,
help='Simulate, quiet but print JSON information. See the "OUTPUT TEMPLATE" for a description of available keys.')
verbosity.add_option(
'-J', '--dump-single-json',
action='store_true', dest='dump_single_json', default=False,
help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
verbosity.add_option(
'--print-json',
action='store_true', dest='print_json', default=False,
help='Be quiet and print the video information as JSON (video is still being downloaded).',
)
verbosity.add_option(
'--newline',
action='store_true', dest='progress_with_newline', default=False,
help='Output progress bar as new lines')
verbosity.add_option(
'--no-progress',
action='store_true', dest='noprogress', default=False,
help='Do not print progress bar')
verbosity.add_option(
'--console-title',
action='store_true', dest='consoletitle', default=False,
help='Display progress in console titlebar')
verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print various debugging information')
verbosity.add_option(
'--dump-pages', '--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='Print downloaded pages encoded using base64 to debug problems (very verbose)')
verbosity.add_option(
'--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option(
'--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option(
'--print-traffic', '--dump-headers',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
verbosity.add_option(
'-C', '--call-home',
dest='call_home', action='store_true', default=False,
help='Contact the youtube-dl server for debugging')
verbosity.add_option(
'--no-call-home',
dest='call_home', action='store_false', default=False,
help='Do NOT contact the youtube-dl server for debugging')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
filesystem.add_option(
'-a', '--batch-file',
dest='batchfile', metavar='FILE',
help="File containing URLs to download ('-' for stdin), one URL per line. "
"Lines starting with '#', ';' or ']' are considered as comments and ignored.")
filesystem.add_option(
'--id', default=False,
action='store_true', dest='useid', help='Use only video ID in file name')
filesystem.add_option(
'-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info'))
filesystem.add_option(
'--output-na-placeholder',
dest='outtmpl_na_placeholder', metavar='PLACEHOLDER', default='NA',
help=('Placeholder value for unavailable meta fields in output filename template (default is "%default")'))
filesystem.add_option(
'--autonumber-size',
dest='autonumber_size', metavar='NUMBER', type=int,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'--autonumber-start',
dest='autonumber_start', metavar='NUMBER', default=1, type=int,
help='Specify the start value for %(autonumber)s (default is %default)')
filesystem.add_option(
'--restrict-filenames',
action='store_true', dest='restrictfilenames', default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-l', '--literal', default=False,
action='store_true', dest='usetitle',
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-w', '--no-overwrites',
action='store_true', dest='nooverwrites', default=False,
help='Do not overwrite files')
filesystem.add_option(
'-c', '--continue',
action='store_true', dest='continue_dl', default=True,
help='Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
filesystem.add_option(
'--no-continue',
action='store_false', dest='continue_dl',
help='Do not resume partially downloaded files (restart from beginning)')
filesystem.add_option(
'--no-part',
action='store_true', dest='nopart', default=False,
help='Do not use .part files - write directly into output file')
filesystem.add_option(
'--no-mtime',
action='store_false', dest='updatetime', default=True,
help='Do not use the Last-modified header to set the file modification time')
filesystem.add_option(
'--write-description',
action='store_true', dest='writedescription', default=False,
help='Write video description to a .description file')
filesystem.add_option(
'--write-info-json',
action='store_true', dest='writeinfojson', default=False,
help='Write video metadata to a .info.json file')
filesystem.add_option(
'--write-annotations',
action='store_true', dest='writeannotations', default=False,
help='Write video annotations to a .annotations.xml file')
filesystem.add_option(
'--load-info-json', '--load-info',
dest='load_info_filename', metavar='FILE',
help='JSON file containing the video information (created with the "--write-info-json" option)')
filesystem.add_option(
'--cookies',
dest='cookiefile', metavar='FILE',
help='File to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir',
action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
thumbnail = optparse.OptionGroup(parser, 'Thumbnail Options')
thumbnail.add_option(
'--write-thumbnail',
action='store_true', dest='writethumbnail', default=False,
help='Write thumbnail image to disk')
thumbnail.add_option(
'--write-all-thumbnails',
action='store_true', dest='write_all_thumbnails', default=False,
help='Write all thumbnail image formats to disk')
thumbnail.add_option(
'--list-thumbnails',
action='store_true', dest='list_thumbnails', default=False,
help='Simulate and list all available thumbnail formats')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
postproc.add_option(
'-x', '--extract-audio',
action='store_true', dest='extractaudio', default=False,
help='Convert video files to audio-only files (requires ffmpeg/avconv and ffprobe/avprobe)')
postproc.add_option(
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='Specify audio format: "best", "aac", "flac", "mp3", "m4a", "opus", "vorbis", or "wav"; "%default" by default; No effect without -x')
postproc.add_option(
'--audio-quality', metavar='QUALITY',
dest='audioquality', default='5',
help='Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
postproc.add_option(
'--recode-video',
metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)')
postproc.add_option(
'--postprocessor-args',
dest='postprocessor_args', metavar='ARGS',
help='Give these arguments to the postprocessor')
postproc.add_option(
'-k', '--keep-video',
action='store_true', dest='keepvideo', default=False,
help='Keep the video file on disk after the post-processing; the video is erased by default')
postproc.add_option(
'--no-post-overwrites',
action='store_true', dest='nopostoverwrites', default=False,
help='Do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option(
'--embed-subs',
action='store_true', dest='embedsubtitles', default=False,
help='Embed subtitles in the video (only for mp4, webm and mkv videos)')
postproc.add_option(
'--embed-thumbnail',
action='store_true', dest='embedthumbnail', default=False,
help='Embed thumbnail in the audio as cover art')
postproc.add_option(
'--add-metadata',
action='store_true', dest='addmetadata', default=False,
help='Write metadata to the video file')
postproc.add_option(
'--metadata-from-title',
metavar='FORMAT', dest='metafromtitle',
help='Parse additional metadata like song title / artist from the video title. '
'The format syntax is the same as --output. Regular expression with '
'named capture groups may also be used. '
'The parsed parameters replace existing values. '
'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
'"Coldplay - Paradise". '
'Example (regex): --metadata-from-title "(?P<artist>.+?) - (?P<title>.+)"')
postproc.add_option(
'--xattrs',
action='store_true', dest='xattrs', default=False,
help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option(
'--fixup',
metavar='POLICY', dest='fixup', default='detect_or_warn',
help='Automatically correct known faults of the file. '
'One of never (do nothing), warn (only emit a warning), '
'detect_or_warn (the default; fix file if we can, warn otherwise)')
postproc.add_option(
'--prefer-avconv',
action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors')
postproc.add_option(
'--prefer-ffmpeg',
action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors (default)')
postproc.add_option(
'--ffmpeg-location', '--avconv-location', metavar='PATH',
dest='ffmpeg_location',
help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.')
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading and post-processing, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
'--convert-subs', '--convert-subtitles',
metavar='FORMAT', dest='convertsubtitles', default=None,
help='Convert the subtitles to other format (currently supported: srt|ass|vtt|lrc)')
parser.add_option_group(general)
parser.add_option_group(network)
parser.add_option_group(geo)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(thumbnail)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(adobe_pass)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
def compat_conf(conf):
if sys.version_info < (3,):
return [a.decode(preferredencoding(), 'replace') for a in conf]
return conf
command_line_conf = compat_conf(sys.argv[1:])
opts, args = parser.parse_args(command_line_conf)
system_conf = user_conf = custom_conf = []
if '--config-location' in command_line_conf:
location = compat_expanduser(opts.config_location)
if os.path.isdir(location):
location = os.path.join(location, 'youtube-dl.conf')
if not os.path.exists(location):
parser.error('config-location %s does not exist.' % location)
custom_conf = _readOptions(location)
elif '--ignore-config' in command_line_conf:
pass
else:
system_conf = _readOptions('/etc/youtube-dl.conf')
if '--ignore-config' not in system_conf:
user_conf = _readUserConf()
argv = system_conf + user_conf + custom_conf + command_line_conf
opts, args = parser.parse_args(argv)
if opts.verbose:
for conf_label, conf in (
('System config', system_conf),
('User config', user_conf),
('Custom config', custom_conf),
('Command-line args', command_line_conf)):
write_string('[debug] %s: %s\n' % (conf_label, repr(_hide_login_info(conf))))
return parser, opts, args
| 46.112432
| 391
| 0.640385
|
4a1954cf1a35f006af77d64f46502cd5537b880d
| 8,506
|
py
|
Python
|
utilities/file_scripts.py
|
imgVOID/autograding-api
|
7c2f5491607d5d76880827c73565f9f5be5a33ad
|
[
"Apache-2.0"
] | 5
|
2021-11-08T18:55:09.000Z
|
2022-02-27T19:14:35.000Z
|
utilities/file_scripts.py
|
imgVOID/autograde-py
|
7c2f5491607d5d76880827c73565f9f5be5a33ad
|
[
"Apache-2.0"
] | null | null | null |
utilities/file_scripts.py
|
imgVOID/autograde-py
|
7c2f5491607d5d76880827c73565f9f5be5a33ad
|
[
"Apache-2.0"
] | 2
|
2021-12-23T05:11:31.000Z
|
2021-12-26T13:42:21.000Z
|
"""
`file_scripts` module stores tasks I/O utilities.
"""
import aiofiles
from aiofiles.os import remove, mkdir
from os.path import abspath, join, normpath, isfile
from json import loads, dumps
from typing import List, Iterable
class FileUtils:
"""
`FileUtils` class stores utilities for saving user input files and file paths.
"""
@classmethod
async def _get_filepath(
cls: 'FileUtils', title: str, topic_id: int = None, task_id: int = None
) -> str or None:
"""
`FileUtils._get_filepath` private class method returns the path to a file by path name.
It takes three parameters (excluding cls):
1. `title` has four variants: task_info, task_input, task_output, task_code.
2. `topic_id` means an id of the topic and the directory name.
3. `task_id means` an id of the task in a topic and a part of the file name.
"""
topic_path = None
if topic_id is not None:
topic_index = await cls.open_file('topic_index')
topic_path = topic_index[topic_id].get("path")
filesystem = {
"task_info": normpath(abspath(
join('materials', f'{topic_path}', 'description', f'task_{task_id}.json')
)),
"task_input": normpath(abspath(
join('materials', f'{topic_path}', 'input', f'task_{task_id}.txt')
)),
"task_output": normpath(abspath(
join('materials', f'{topic_path}', 'output', f'task_{task_id}.txt')
)),
"task_code": normpath(abspath(
join('materials', f'{topic_path}', 'code', f'task_{task_id}.txt')
)),
"topic_index": normpath(abspath(
join('materials', f'topics.json')
))
}
try:
return filesystem[title]
except KeyError as e:
raise ValueError(f'No such get_filepath() mode like "{title}"') from e
@staticmethod
async def _write_user_answer_temp(code: bytes) -> str:
"""
`FileUtils._write_user_answer_temp` private static method
returns the user input's temp file name.
It takes one parameter: code, type: bytes.
"""
async with aiofiles.tempfile.NamedTemporaryFile(
'wb', delete=False, dir='./temp/'
) as f:
await f.write(code)
return f.name.split('temp')[1]
@classmethod
async def open_file(
cls: 'FileUtils', title: str, topic_id: int = None, task_id: int = None
) -> dict or str:
"""
`FileUtils.open_file` public class method accesses
topic index, task description or task code.
It returns the content of the file read.
It takes three parameters (excluding cls):
1. `title` has 3 variants - topic_index, task_info, task_code.
2. `topic_id` means an id of the topic and the directory name.
3. `task_id` means an id of the task in a topic and a part of the file name.
"""
path = await cls._get_filepath(title, topic_id, task_id)
try:
async with aiofiles.open(path, encoding='utf-8', mode='r') as f:
content = await f.read()
content = content.encode('utf-8')
if '.json' in f.name:
return loads(content)
elif '.txt' in f.name:
return content
else:
raise ValueError('Wrong file extension.')
except FileNotFoundError as e:
raise FileNotFoundError(
f'File not found: title={title}, topic_id={topic_id}, task_id={task_id}'
) from e
@classmethod
async def open_file_values(
cls: 'FileUtils', title: str, topic_id: int = None, task_id: int = None
) -> List[bytes]:
"""
`FileUtils.open_file_values` public class method accesses
task input and task output values.
It returns the content of the file read, separated by a newline.
It takes three parameters (excluding cls):
1. `title` has 2 variants - task_input, task_output.
2. `topic_id` means an id of the topic and the directory name.
3. `task_id` means an id of the task in a topic and a part of the file name.
"""
path = await cls._get_filepath(title, topic_id, task_id)
async with aiofiles.open(path, encoding='utf-8', mode='r') as f:
if f.name.endswith('.txt'):
content = await f.read()
return content.encode('utf-8').split(b'\n')
else:
raise ValueError('Wrong file extension.')
@classmethod
async def save_file(
cls: 'FileUtils', title: str, content: bytes or dict,
topic_id: int = None, task_id: int = None
) -> None:
"""
`FileUtils.save_file` public class method writes
topic index, task description or task code to file.
It takes four parameters (excluding cls):
1. `title` has 3 variants - topic_index, task_info, task_code.
2. `content` is the text that will be written to a file.
3. `topic_id` means an id of the topic and the directory name.
4. `task_id` means an id of the task in a topic and a part of the file name.
"""
path = await cls._get_filepath(title, topic_id, task_id)
async with aiofiles.open(path, encoding='utf-8', mode='w') as f:
if f.name.endswith('.json'):
content = dumps(content, ensure_ascii=False)
elif f.name.endswith('.txt'):
content = content.decode('utf-8')
else:
raise ValueError('Wrong file extension.')
await f.write(content)
@classmethod
async def save_file_values(
cls: 'FileUtils', title: str, content: Iterable[str],
topic_id: int = None, task_id: int = None
) -> None:
"""
`FileUtils.save_file_values` public class method writes
task input and task output values to file.
It takes four parameters:
1. `title` has 2 variants - task_input, task_output.
2. `content` is the text that will be written to a file.
3. `topic_id` means an id of the topic and the directory name.
4. `task_id` means an id of the task in a topic and a part of the file name.
"""
path = await cls._get_filepath(title, topic_id, task_id)
async with aiofiles.open(path, mode='w', encoding='utf-8') as f:
if not f.name.endswith('.txt'):
raise ValueError('Wrong file extension.')
else:
for value in content:
await f.writelines(f'{value}\n')
@classmethod
async def remove_file(
cls: 'FileUtils', title: str, topic_id: int, task_id: int
) -> None:
"""
`FileUtils.remove_file` public class method removes
any file related to a task.
It takes three parameters:
1. `title` has 5 variants - topic_index, task_info, task_code, task_input, task_output.
3. `topic_id` means an id of the topic and the directory name.
4. `task_id` means an id of the task in a topic and a part of the file name.
"""
path = await cls._get_filepath(title, topic_id, task_id)
try:
await remove(path)
except OSError as e:
raise FileNotFoundError(f'File path can not be removed: {path}') from e
@classmethod
async def get_user_answer_temp(
cls: 'FileUtils', code: bytes,
) -> str:
"""
`FileUtils.save_user_input` public class method saves user input on a disk.
It returns the name of a file uploaded by the user, and a random number.
It takes one parameter (excluding cls): code, type: bytes.
"""
try:
return await cls._write_user_answer_temp(code)
except FileNotFoundError:
try:
await mkdir("./temp")
except Exception as e:
raise FileNotFoundError("Something went wrong until the input saving") from e
else:
return await cls._write_user_answer_temp(code)
@staticmethod
async def remove_user_answer_file(temp_name: str) -> None:
user_input_path = f"./temp/{temp_name}"
await remove(user_input_path) if isfile(user_input_path) else None
| 40.894231
| 95
| 0.588878
|
4a19550275795b3874380f3f348d8cad1699e08c
| 12,239
|
py
|
Python
|
rwe/analysis.py
|
som-shahlab/ehr-rwe
|
9653a6abc837dee7759ed245939716b7d50525cc
|
[
"Apache-2.0"
] | 25
|
2020-02-12T00:07:03.000Z
|
2021-12-01T22:50:24.000Z
|
rwe/analysis.py
|
som-shahlab/ehr-rwe
|
9653a6abc837dee7759ed245939716b7d50525cc
|
[
"Apache-2.0"
] | 1
|
2021-01-28T22:49:23.000Z
|
2021-01-28T22:49:23.000Z
|
rwe/analysis.py
|
som-shahlab/ehr-rwe
|
9653a6abc837dee7759ed245939716b7d50525cc
|
[
"Apache-2.0"
] | 3
|
2021-03-09T02:47:19.000Z
|
2021-05-21T14:51:02.000Z
|
from collections import Counter, defaultdict
import torch
import numpy as np
import scipy.sparse as sparse
from scipy.sparse import issparse
from pandas import DataFrame, Series
#from metal.utils import arraylike_to_numpy
def arraylike_to_numpy(array_like):
"""Convert a 1d array-like (e.g,. list, tensor, etc.) to an np.ndarray"""
orig_type = type(array_like)
# Convert to np.ndarray
if isinstance(array_like, np.ndarray):
pass
elif isinstance(array_like, list):
array_like = np.array(array_like)
elif issparse(array_like):
array_like = array_like.toarray()
elif isinstance(array_like, torch.Tensor):
array_like = array_like.numpy()
elif not isinstance(array_like, np.ndarray):
array_like = np.array(array_like)
else:
msg = f"Input of type {orig_type} could not be converted to 1d " "np.ndarray"
raise ValueError(msg)
# Correct shape
if (array_like.ndim > 1) and (1 in array_like.shape):
array_like = array_like.flatten()
if array_like.ndim != 1:
raise ValueError("Input could not be converted to 1d np.array")
# Convert to ints
if any(array_like % 1):
raise ValueError("Input contains at least one non-integer value.")
array_like = array_like.astype(np.dtype(int))
return array_like
############################################################
# Label Matrix Diagnostics
############################################################
def _covered_data_points(L):
"""Returns an indicator vector where ith element = 1 if x_i is labeled by at
least one LF."""
return np.ravel(np.where(L.sum(axis=1) != 0, 1, 0))
def _overlapped_data_points(L):
"""Returns an indicator vector where ith element = 1 if x_i is labeled by
more than one LF."""
return np.where(np.ravel((L != 0).sum(axis=1)) > 1, 1, 0)
def _conflicted_data_points(L):
"""Returns an indicator vector where ith element = 1 if x_i is labeled by
at least two LFs that give it disagreeing labels."""
m = sparse.diags(np.ravel(L.max(axis=1).todense()))
return np.ravel(np.max(m @ (L != 0) != L, axis=1).astype(int).todense())
def label_coverage(L):
"""Returns the **fraction of data points with > 0 (non-zero) labels**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith item
"""
return _covered_data_points(L).sum() / L.shape[0]
def label_overlap(L):
"""Returns the **fraction of data points with > 1 (non-zero) labels**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith item
"""
return _overlapped_data_points(L).sum() / L.shape[0]
def label_conflict(L):
"""Returns the **fraction of data points with conflicting (disagreeing)
lablels.**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith item
"""
return _conflicted_data_points(L).sum() / L.shape[0]
def lf_polarities(L):
"""Return the polarities of each LF based on evidence in a label matrix.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
"""
polarities = [sorted(list(set(L[:, i].data))) for i in range(L.shape[1])]
return [p[0] if len(p) == 1 else p for p in polarities]
def lf_coverages(L):
"""Return the **fraction of data points that each LF labels.**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
"""
return np.ravel((L != 0).sum(axis=0)) / L.shape[0]
def lf_overlaps(L, normalize_by_coverage=False):
"""Return the **fraction of items each LF labels that are also labeled by at
least one other LF.**
Note that the maximum possible overlap fraction for an LF is the LF's
coverage, unless `normalize_by_coverage=True`, in which case it is 1.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
normalize_by_coverage: Normalize by coverage of the LF, so that it
returns the percent of LF labels that have overlaps.
"""
overlaps = (L != 0).T @ _overlapped_data_points(L) / L.shape[0]
if normalize_by_coverage:
overlaps /= lf_coverages(L)
return np.nan_to_num(overlaps)
def lf_conflicts(L, normalize_by_overlaps=False):
"""Return the **fraction of items each LF labels that are also given a
different (non-abstain) label by at least one other LF.**
Note that the maximum possible conflict fraction for an LF is the LF's
overlaps fraction, unless `normalize_by_overlaps=True`, in which case it
is 1.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
normalize_by_overlaps: Normalize by overlaps of the LF, so that it
returns the percent of LF overlaps that have conflicts.
"""
conflicts = (L != 0).T @ _conflicted_data_points(L) / L.shape[0]
if normalize_by_overlaps:
conflicts /= lf_overlaps(L)
return np.nan_to_num(conflicts)
def lf_empirical_accuracies(L, Y):
"""Return the **empirical accuracy** against a set of labels Y (e.g. dev
set) for each LF.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
Y: an [n] or [n, 1] np.ndarray of gold labels
"""
# Assume labeled set is small, work with dense matrices
Y = arraylike_to_numpy(Y)
L = L.toarray()
X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1))
return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1)
def lf_summary(L, Y=None, lf_names=None, est_accs=None):
"""Returns a pandas DataFrame with the various per-LF statistics.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
Y: an [n] or [n, 1] np.ndarray of gold labels.
If provided, the empirical accuracy for each LF will be calculated
"""
n, m = L.shape
if lf_names is not None:
col_names = ["j"]
d = {"j": list(range(m))}
else:
lf_names = list(range(m))
col_names = []
d = {}
# Default LF stats
col_names.extend(["Polarity", "Coverage", "Overlaps", "Conflicts"])
d["Polarity"] = Series(data=lf_polarities(L), index=lf_names)
d["Coverage"] = Series(data=lf_coverages(L), index=lf_names)
d["Overlaps"] = Series(data=lf_overlaps(L), index=lf_names)
d["Conflicts"] = Series(data=lf_conflicts(L), index=lf_names)
if Y is not None:
col_names.extend(["Correct", "Incorrect", "Emp. Acc."])
confusions = [
confusion_matrix(Y, L[:, i], pretty_print=False) for i in range(m)
]
corrects = [np.diagonal(conf).sum() for conf in confusions]
incorrects = [
conf.sum() - correct for conf, correct in zip(confusions, corrects)
]
accs = lf_empirical_accuracies(L, Y)
d["Correct"] = Series(data=corrects, index=lf_names)
d["Incorrect"] = Series(data=incorrects, index=lf_names)
d["Emp. Acc."] = Series(data=accs, index=lf_names)
if est_accs is not None:
col_names.append("Learned Acc.")
d["Learned Acc."] = Series(est_accs, index=lf_names)
return DataFrame(data=d, index=lf_names)[col_names]
def single_lf_summary(Y_p, Y=None):
"""Calculates coverage, overlap, conflicts, and accuracy for a single LF
Args:
Y_p: a np.array or torch.Tensor of predicted labels
Y: a np.array or torch.Tensor of true labels (if known)
"""
L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape(-1, 1))
return lf_summary(L, Y)
def error_buckets(gold, pred, X=None):
"""Group items by error buckets
Args:
gold: an array-like of gold labels (ints)
pred: an array-like of predictions (ints)
X: an iterable of items
Returns:
buckets: A dict of items where buckets[i,j] is a list of items with
predicted label i and true label j. If X is None, return indices
instead.
For a binary problem with (1=positive, 2=negative):
buckets[1,1] = true positives
buckets[1,2] = false positives
buckets[2,1] = false negatives
buckets[2,2] = true negatives
"""
buckets = defaultdict(list)
gold = arraylike_to_numpy(gold)
pred = arraylike_to_numpy(pred)
for i, (y, l) in enumerate(zip(pred, gold)):
buckets[y, l].append(X[i] if X is not None else i)
return buckets
def confusion_matrix(
gold, pred, null_pred=False, null_gold=False, normalize=False, pretty_print=True
):
"""A shortcut method for building a confusion matrix all at once.
Args:
gold: an array-like of gold labels (ints)
pred: an array-like of predictions (ints)
null_pred: If True, include the row corresponding to null predictions
null_gold: If True, include the col corresponding to null gold labels
normalize: if True, divide counts by the total number of items
pretty_print: if True, pretty-print the matrix before returning
"""
conf = ConfusionMatrix(null_pred=null_pred, null_gold=null_gold)
gold = arraylike_to_numpy(gold)
pred = arraylike_to_numpy(pred)
conf.add(gold, pred)
mat = conf.compile()
if normalize:
mat = mat / len(gold)
if pretty_print:
conf.display(normalize=normalize)
return mat
class ConfusionMatrix(object):
"""
An iteratively built abstention-aware confusion matrix with pretty printing
Assumed axes are true label on top, predictions on the side.
"""
def __init__(self, null_pred=False, null_gold=False):
"""
Args:
null_pred: If True, include the row corresponding to null
predictions
null_gold: If True, include the col corresponding to null gold
labels
"""
self.counter = Counter()
self.mat = None
self.null_pred = null_pred
self.null_gold = null_gold
def __repr__(self):
if self.mat is None:
self.compile()
return str(self.mat)
def add(self, gold, pred):
"""
Args:
gold: a np.ndarray of gold labels (ints)
pred: a np.ndarray of predictions (ints)
"""
self.counter.update(zip(gold, pred))
def compile(self, trim=True):
k = max([max(tup) for tup in self.counter.keys()]) + 1 # include 0
mat = np.zeros((k, k), dtype=int)
for (y, l), v in self.counter.items():
mat[l, y] = v
if trim and not self.null_pred:
mat = mat[1:, :]
if trim and not self.null_gold:
mat = mat[:, 1:]
self.mat = mat
return mat
def display(self, normalize=False, indent=0, spacing=2, decimals=3, mark_diag=True):
mat = self.compile(trim=False)
m, n = mat.shape
tab = " " * spacing
margin = " " * indent
# Print headers
s = margin + " " * (5 + spacing)
for j in range(n):
if j == 0 and not self.null_gold:
continue
s += f" y={j} " + tab
print(s)
# Print data
for i in range(m):
# Skip null predictions row if necessary
if i == 0 and not self.null_pred:
continue
s = margin + f" l={i} " + tab
for j in range(n):
# Skip null gold if necessary
if j == 0 and not self.null_gold:
continue
else:
if i == j and mark_diag and normalize:
s = s[:-1] + "*"
if normalize:
s += f"{mat[i,j]/sum(mat[i,1:]):>5.3f}" + tab
else:
s += f"{mat[i,j]:^5d}" + tab
print(s)
| 33.809392
| 88
| 0.605523
|
4a195515e9992755854332385ee261a5282a1fe9
| 7,085
|
py
|
Python
|
pgopttune/workload/sampled_workload.py
|
ssl-oyamata/postgres_opttune
|
d31088c575097ec6d88f2aa22d4acc47593d3566
|
[
"Apache-2.0"
] | 28
|
2020-02-01T11:29:38.000Z
|
2022-03-11T15:02:27.000Z
|
pgopttune/workload/sampled_workload.py
|
ssl-oyamata/postgres_opttune
|
d31088c575097ec6d88f2aa22d4acc47593d3566
|
[
"Apache-2.0"
] | null | null | null |
pgopttune/workload/sampled_workload.py
|
ssl-oyamata/postgres_opttune
|
d31088c575097ec6d88f2aa22d4acc47593d3566
|
[
"Apache-2.0"
] | 2
|
2020-02-03T10:59:41.000Z
|
2021-12-17T03:11:08.000Z
|
import os
from logging import getLogger
import datetime
import pickle
import multiprocessing
from psycopg2.extras import DictCursor
from pgopttune.workload.workload import Workload
from pgopttune.utils.pg_connect import get_pg_connection
from pgopttune.config.postgres_server_config import PostgresServerConfig
from pgopttune.config.workload_sampling_config import WorkloadSamplingConfig
from pgopttune.workload.sampled_transaction import SampledTransaction
logger = getLogger(__name__)
class SampledWorkload(Workload):
def __init__(self,
postgres_server_config: PostgresServerConfig,
workload_sampling_config: WorkloadSamplingConfig,
start_unix_time, end_unix_time, my_transactions: list = None):
super().__init__(postgres_server_config)
self.workload_sampling_config = workload_sampling_config
self.start_unix_time = start_unix_time
self.end_unix_time = end_unix_time
if my_transactions is None:
self.my_transactions = []
self.extract_workload()
else:
self.my_transactions = my_transactions
def extract_workload(self):
extract_workload_sql = '''
SELECT
-- log_time,
-- query_stat_time = log_time - duration - start_unix_time
(log_time::timestamp(3) with time zone - substring(message from '(?<=duration: ).*ms')::interval
- to_timestamp(%s)) AS query_stat_time,
-- database_name,
session_id,
-- substring(message from '(?<=duration: ).*(?= ms)') AS duration,
substring(message from '(?<=statement: ).*') AS statement
FROM
csv_log
WHERE
log_time > to_timestamp(%s) AND
log_time <= to_timestamp(%s) AND
database_name = %s AND
message LIKE '%%duration%%'
ORDER BY session_id,
session_line_num;
-- log_time;
'''
with get_pg_connection(dsn=self.workload_sampling_config.dsn) as conn:
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(extract_workload_sql, (self.start_unix_time, self.start_unix_time, self.end_unix_time,
self.postgres_server_config.database))
workload_rows = cur.fetchall()
# logger.debug("workload_rows : {}".format(workload_rows))
self._create_transactions(workload_rows)
def _create_transactions(self, workload_rows):
query_stat_time = []
session_id = ''
statement = []
for index, row in enumerate(workload_rows):
if session_id == row[1] or index == 0: # same session statement
query_stat_time.append(row[0])
session_id = row[1]
statement.append(row[2])
else:
my_transaction = SampledTransaction(session_id, query_stat_time, statement)
self.my_transactions.append(my_transaction)
query_stat_time = [row[0]]
session_id = row[1]
statement = [row[2]]
def save_sampled_workload(self):
save_file_name = datetime.datetime.fromtimestamp(self.start_unix_time).strftime("%Y-%m-%d_%H%M%S.%f") + \
"-" \
+ datetime.datetime.fromtimestamp(self.end_unix_time).strftime("%Y-%m-%d_%H%M%S.%f") + ".pkl"
save_file_path = os.path.join("workload_data", save_file_name)
with open(save_file_path, 'wb') as f:
pickle.dump(self, f)
return save_file_path
def run(self):
session_num = len(self.my_transactions) # number of session
logger.debug("Number of session : {} ".format(session_num))
with multiprocessing.Pool(session_num) as p:
args = range(session_num)
elapsed_times = p.map(self._run_transaction, args)
logger.debug("Transactions elapsed times : {} ".format(elapsed_times))
elapsed_time = sum(elapsed_times)
# single process execute #
# for index, my_transaction in enumerate(self.my_transactions):
# logger.debug("Transaction's statement : {}".format(my_transaction.statement))
# transaction_elapsed_time = my_transaction.run(self._postgres_server_config)
# logger.debug("elapsed time : {0:.4f} s".format(transaction_elapsed_time))
# elapsed_time += transaction_elapsed_time
# logger.debug("Transactions elapsed time(sum) : {0:.4f} s".format(elapsed_time))
logger.debug("Transactions elapsed time(sum) : {0:.4f} s".format(elapsed_time))
return elapsed_time
@classmethod
def load_sampled_workload(cls, load_file_path, postgres_server_config: PostgresServerConfig = None):
with open(load_file_path, 'rb') as f:
workload = pickle.load(f)
if postgres_server_config is not None:
workload.postgres_server_config = postgres_server_config
return workload
def data_load(self):
# TODO:
logger.warning("At the moment, in the sampled workload, The data reload function is not implemented.")
def warm_up(self):
# TODO:
logger.warning("At the moment, in the sampled workload, The warm up function is not implemented.")
def _run_transaction(self, transaction_index=0):
# logger.debug("Transaction's statement : {}".format(self.my_transactions[transaction_index].statement))
transaction_elapsed_time = self.my_transactions[transaction_index].run(self.postgres_server_config)
# logger.debug("elapsed time : {0:.4f} s".format(transaction_elapsed_time))
return transaction_elapsed_time
if __name__ == "__main__":
from pgopttune.config.postgres_server_config import PostgresServerConfig
from logging import basicConfig, DEBUG
basicConfig(level=DEBUG)
conf_path = './conf/postgres_opttune.conf'
postgres_server_config_test = PostgresServerConfig(conf_path) # PostgreSQL Server config
workload_sampling_config_test = WorkloadSamplingConfig(conf_path)
sampled_workload = SampledWorkload(start_unix_time=1593093506.9530554, end_unix_time=1593093567.088895,
workload_sampling_config=workload_sampling_config_test,
postgres_server_config=postgres_server_config_test)
save_file = sampled_workload.save_sampled_workload()
logger.debug("run transactions ")
workload_elapsed_time = sampled_workload.run()
logger.debug(workload_elapsed_time)
load_workload = SampledWorkload.load_sampled_workload(save_file, postgres_server_config=postgres_server_config_test)
logger.debug("run transactions using saved file")
load_workload_elapsed_time = load_workload.run()
logger.debug(load_workload_elapsed_time)
logger.debug("finised...")
logger.debug(workload_elapsed_time)
logger.debug(load_workload_elapsed_time)
# my_workload.extract_workload()
| 46.611842
| 120
| 0.666902
|
4a19552a40e985fb28da65c7c236f1b33261f67f
| 1,366
|
py
|
Python
|
tests/test_api_public.py
|
snowdensb/braindump
|
815ae0afebcf867f02143f3ab9cf88b1d4dacdec
|
[
"MIT"
] | 631
|
2015-01-20T17:32:54.000Z
|
2022-01-27T04:34:59.000Z
|
tests/test_api_public.py
|
iknownothing/braindump
|
9640dd03f99851dbd34dd6cac98a747a4a591b01
|
[
"MIT"
] | 241
|
2015-01-20T16:37:53.000Z
|
2017-01-10T00:28:04.000Z
|
tests/test_api_public.py
|
iknownothing/braindump
|
9640dd03f99851dbd34dd6cac98a747a4a591b01
|
[
"MIT"
] | 92
|
2015-11-27T18:33:18.000Z
|
2022-02-19T18:55:44.000Z
|
import json
from flask import url_for
from api_base import ApiBaseTestCase
class PublicApiTestCase(ApiBaseTestCase):
def test_public_stats_empty(self):
res = self.client.get('/api/v1/public/stats')
json_res = json.loads(res.data.decode('utf-8'))
self.assertEqual(0, json_res['users'])
self.assertEqual(0, json_res['notes'])
def test_public_stats_with_user(self):
self.add_user()
self.add_other_user()
res = self.client.get('/api/v1/public/stats')
json_res = json.loads(res.data.decode('utf-8'))
self.assertEqual(2, json_res['users'])
def test_public_states_with_notes(self):
u = self.add_user()
nb = self.add_notebook(u)
note = self.add_note(nb, u)
res = self.client.get('/api/v1/public/stats')
json_res = json.loads(res.data.decode('utf-8'))
self.assertEqual(1, json_res['users'])
self.assertEqual(1, json_res['notes'])
u1n2 = self.add_note(nb, u)
u1n3 = self.add_note(nb, u)
u2 = self.add_other_user()
nb2 = self.add_notebook(u2)
note = self.add_note(nb2, u2)
res = self.client.get('/api/v1/public/stats')
json_res = json.loads(res.data.decode('utf-8'))
self.assertEqual(2, json_res['users'])
self.assertEqual(4, json_res['notes'])
| 25.773585
| 55
| 0.622255
|
4a19552d840e37d59b1286e2c48303d68479298c
| 4,226
|
py
|
Python
|
07_train/privacy/tensorflow_privacy/privacy/membership_inference_attack/keras_evaluation_example.py
|
dpai/workshop
|
d4936da77dac759ba2bac95a9584fde8e86c6b2b
|
[
"Apache-2.0"
] | 2,327
|
2020-03-01T09:47:34.000Z
|
2021-11-25T12:38:42.000Z
|
07_train/privacy/tensorflow_privacy/privacy/membership_inference_attack/keras_evaluation_example.py
|
trideau/Data-Science-with-AWS-Workshop
|
7dbe7989fa99e88544da8bf262beec907c536093
|
[
"Apache-2.0"
] | 209
|
2020-03-01T17:14:12.000Z
|
2021-11-08T20:35:42.000Z
|
07_train/privacy/tensorflow_privacy/privacy/membership_inference_attack/keras_evaluation_example.py
|
trideau/Data-Science-with-AWS-Workshop
|
7dbe7989fa99e88544da8bf262beec907c536093
|
[
"Apache-2.0"
] | 686
|
2020-03-03T17:24:51.000Z
|
2021-11-25T23:39:12.000Z
|
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""An example for using keras_evaluation."""
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import get_flattened_attack_metrics
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import SlicingSpec
from tensorflow_privacy.privacy.membership_inference_attack.keras_evaluation import MembershipInferenceCallback
from tensorflow_privacy.privacy.membership_inference_attack.keras_evaluation import run_attack_on_keras_model
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.02, 'Learning rate for training')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('epochs', 100, 'Number of epochs')
flags.DEFINE_string('model_dir', None, 'Model directory.')
flags.DEFINE_bool('tensorboard_merge_classifiers', False, 'If true, plot '
'different classifiers with the same slicing_spec and metric '
'in the same figure.')
def small_cnn():
"""Setup a small CNN for image classification."""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=(32, 32, 3)))
for _ in range(3):
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10))
return model
def load_cifar10():
"""Loads CIFAR10 data."""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = np.array(x_train, dtype=np.float32) / 255
x_test = np.array(x_test, dtype=np.float32) / 255
y_train = np.array(y_train, dtype=np.int32).squeeze()
y_test = np.array(y_test, dtype=np.int32).squeeze()
return x_train, y_train, x_test, y_test
def main(unused_argv):
# Load training and test data.
x_train, y_train, x_test, y_test = load_cifar10()
# Get model, optimizer and specify loss.
model = small_cnn()
optimizer = tf.keras.optimizers.SGD(lr=FLAGS.learning_rate, momentum=0.9)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
# Get callback for membership inference attack.
mia_callback = MembershipInferenceCallback(
(x_train, y_train),
(x_test, y_test),
slicing_spec=SlicingSpec(entire_dataset=True, by_class=True),
attack_types=[AttackType.THRESHOLD_ATTACK,
AttackType.K_NEAREST_NEIGHBORS],
tensorboard_dir=FLAGS.model_dir,
tensorboard_merge_classifiers=FLAGS.tensorboard_merge_classifiers)
# Train model with Keras
model.fit(
x_train,
y_train,
epochs=FLAGS.epochs,
validation_data=(x_test, y_test),
batch_size=FLAGS.batch_size,
callbacks=[mia_callback],
verbose=2)
print('End of training attack:')
attack_results = run_attack_on_keras_model(
model, (x_train, y_train), (x_test, y_test),
slicing_spec=SlicingSpec(entire_dataset=True, by_class=True),
attack_types=[
AttackType.THRESHOLD_ATTACK, AttackType.K_NEAREST_NEIGHBORS
])
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
attack_results)
print('\n'.join([' %s: %.4f' % (', '.join([s, t, m]), v) for t, s, m, v in
zip(att_types, att_slices, att_metrics, att_values)]))
if __name__ == '__main__':
app.run(main)
| 37.39823
| 111
| 0.738287
|
4a1955aa74dac2fcaea39c399d527ab177084934
| 1,126
|
py
|
Python
|
lagom/envs/spaces/base.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/envs/spaces/base.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/envs/spaces/base.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
import numpy as np
class Space(object):
"""
Base class for observation and action space e.g. applied to Env.
"""
def __init__(self, shape=None, dtype=None):
if shape is None:
self.shape = None
else:
self.shape = tuple(shape)
if dtype is None:
self.dtype = None
else:
self.dtype = np.dtype(dtype) # create a dtype object
def sample(self):
"""
Uniformly sample an element from this space.
"""
raise NotImplementedError
@property
def flat_dim(self):
"""
Return a flattened dimension
"""
raise NotImplementedError
def flatten(self, x):
"""
Return the flattened x.
"""
raise NotImplementedError
def unflatten(self, x):
"""
Return the unflattened x according to defined shape
"""
raise NotImplementedError
def contains(self, x):
"""
Return True if x is contained in this space.
"""
raise NotImplementedError
| 23.458333
| 68
| 0.530195
|
4a1955f613008c299430b3b79294f9c780e6d4b7
| 354
|
py
|
Python
|
multiprocessing_test.py
|
TheSriram/SwiftParallelizer
|
85d70b537852a4b7b940ba19ff9e6c6db3daebaf
|
[
"Apache-2.0"
] | 1
|
2015-05-21T14:32:37.000Z
|
2015-05-21T14:32:37.000Z
|
multiprocessing_test.py
|
TheSriram/SwiftParallelizer
|
85d70b537852a4b7b940ba19ff9e6c6db3daebaf
|
[
"Apache-2.0"
] | null | null | null |
multiprocessing_test.py
|
TheSriram/SwiftParallelizer
|
85d70b537852a4b7b940ba19ff9e6c6db3daebaf
|
[
"Apache-2.0"
] | null | null | null |
import multiprocessing
import multiprocessing_import_worker
# def worker(num):
# """thread worker function"""
# print 'Worker:'
# return
if __name__ == '__main__':
jobs = []
for i in range(3):
p = multiprocessing.Process(target=multiprocessing_import_worker.worker,args=('sriram_1',))
jobs.append(p)
p.start()
| 27.230769
| 99
| 0.655367
|
4a19565ca4f6bb2c6b621e92b991e28df0dab1c9
| 5,542
|
py
|
Python
|
datumaro/datumaro/plugins/datumaro_format/extractor.py
|
javoweb/cvat
|
684544d2a06c192e7155f655897e6360b4a3be37
|
[
"MIT"
] | 2
|
2020-07-24T18:12:18.000Z
|
2020-08-12T09:14:07.000Z
|
datumaro/datumaro/plugins/datumaro_format/extractor.py
|
javoweb/cvat
|
684544d2a06c192e7155f655897e6360b4a3be37
|
[
"MIT"
] | 24
|
2020-11-13T18:43:15.000Z
|
2022-03-12T00:21:52.000Z
|
datumaro/datumaro/plugins/datumaro_format/extractor.py
|
javoweb/cvat
|
684544d2a06c192e7155f655897e6360b4a3be37
|
[
"MIT"
] | 5
|
2020-07-01T18:02:48.000Z
|
2021-01-22T02:21:48.000Z
|
# Copyright (C) 2019 Intel Corporation
#
# SPDX-License-Identifier: MIT
import json
import os.path as osp
from datumaro.components.extractor import (SourceExtractor, DatasetItem,
AnnotationType, Label, RleMask, Points, Polygon, PolyLine, Bbox, Caption,
LabelCategories, MaskCategories, PointsCategories
)
from datumaro.util.image import Image
from .format import DatumaroPath
class DatumaroExtractor(SourceExtractor):
def __init__(self, path):
assert osp.isfile(path), path
rootpath = ''
if path.endswith(osp.join(DatumaroPath.ANNOTATIONS_DIR, osp.basename(path))):
rootpath = path.rsplit(DatumaroPath.ANNOTATIONS_DIR, maxsplit=1)[0]
images_dir = ''
if rootpath and osp.isdir(osp.join(rootpath, DatumaroPath.IMAGES_DIR)):
images_dir = osp.join(rootpath, DatumaroPath.IMAGES_DIR)
self._images_dir = images_dir
super().__init__(subset=osp.splitext(osp.basename(path))[0])
with open(path, 'r') as f:
parsed_anns = json.load(f)
self._categories = self._load_categories(parsed_anns)
self._items = self._load_items(parsed_anns)
def categories(self):
return self._categories
def __iter__(self):
for item in self._items:
yield item
def __len__(self):
return len(self._items)
@staticmethod
def _load_categories(parsed):
categories = {}
parsed_label_cat = parsed['categories'].get(AnnotationType.label.name)
if parsed_label_cat:
label_categories = LabelCategories()
for item in parsed_label_cat['labels']:
label_categories.add(item['name'], parent=item['parent'])
categories[AnnotationType.label] = label_categories
parsed_mask_cat = parsed['categories'].get(AnnotationType.mask.name)
if parsed_mask_cat:
colormap = {}
for item in parsed_mask_cat['colormap']:
colormap[int(item['label_id'])] = \
(item['r'], item['g'], item['b'])
mask_categories = MaskCategories(colormap=colormap)
categories[AnnotationType.mask] = mask_categories
parsed_points_cat = parsed['categories'].get(AnnotationType.points.name)
if parsed_points_cat:
point_categories = PointsCategories()
for item in parsed_points_cat['items']:
point_categories.add(int(item['label_id']),
item['labels'], adjacent=item['adjacent'])
categories[AnnotationType.points] = point_categories
return categories
def _load_items(self, parsed):
items = []
for item_desc in parsed['items']:
item_id = item_desc['id']
image = None
image_info = item_desc.get('image', {})
if image_info:
image_path = osp.join(self._images_dir,
image_info.get('path', '')) # relative or absolute fits
image = Image(path=image_path, size=image_info.get('size'))
annotations = self._load_annotations(item_desc)
item = DatasetItem(id=item_id, subset=self._subset,
annotations=annotations, image=image)
items.append(item)
return items
def _load_annotations(self, item):
parsed = item['annotations']
loaded = []
for ann in parsed:
ann_id = ann.get('id')
ann_type = AnnotationType[ann['type']]
attributes = ann.get('attributes')
group = ann.get('group')
if ann_type == AnnotationType.label:
label_id = ann.get('label_id')
loaded.append(Label(label=label_id,
id=ann_id, attributes=attributes, group=group))
elif ann_type == AnnotationType.mask:
label_id = ann.get('label_id')
rle = ann['rle']
rle['counts'] = rle['counts'].encode('ascii')
loaded.append(RleMask(rle=rle, label=label_id,
id=ann_id, attributes=attributes, group=group))
elif ann_type == AnnotationType.polyline:
label_id = ann.get('label_id')
points = ann.get('points')
loaded.append(PolyLine(points, label=label_id,
id=ann_id, attributes=attributes, group=group))
elif ann_type == AnnotationType.polygon:
label_id = ann.get('label_id')
points = ann.get('points')
loaded.append(Polygon(points, label=label_id,
id=ann_id, attributes=attributes, group=group))
elif ann_type == AnnotationType.bbox:
label_id = ann.get('label_id')
x, y, w, h = ann.get('bbox')
loaded.append(Bbox(x, y, w, h, label=label_id,
id=ann_id, attributes=attributes, group=group))
elif ann_type == AnnotationType.points:
label_id = ann.get('label_id')
points = ann.get('points')
loaded.append(Points(points, label=label_id,
id=ann_id, attributes=attributes, group=group))
elif ann_type == AnnotationType.caption:
caption = ann.get('caption')
loaded.append(Caption(caption,
id=ann_id, attributes=attributes, group=group))
else:
raise NotImplementedError()
return loaded
| 35.754839
| 85
| 0.589318
|
4a1956b1e8d7b899f7e57c0c32063ff20e6a9840
| 808
|
py
|
Python
|
src/explorer.py
|
floraxue/active-rl
|
db90c24dd70c3bbaa704e354f63ffaa6c2d7d851
|
[
"MIT"
] | null | null | null |
src/explorer.py
|
floraxue/active-rl
|
db90c24dd70c3bbaa704e354f63ffaa6c2d7d851
|
[
"MIT"
] | null | null | null |
src/explorer.py
|
floraxue/active-rl
|
db90c24dd70c3bbaa704e354f63ffaa6c2d7d851
|
[
"MIT"
] | null | null | null |
class Explorer:
"""
Epsilon-greedy with linearyly decayed epsilon
Args:
start_epsilon: max value of epsilon
end_epsilon: min value of epsilon
decay_steps: how many steps it takes for epsilon to decay
"""
def __init__(self, start_eps, end_eps, decay_steps=100000):
assert 0 <= start_eps <= 1, 'invalid start_eps'
assert 0 <= end_eps <= 1, 'invalid end_eps'
assert decay_steps >= 0
self.start_eps = start_eps
self.end_eps = end_eps
self.decay_steps = decay_steps
self.eps = start_eps
def value(self, t):
if t >= self.decay_steps:
return self.end_eps
else:
eps_diff = self.end_eps - self.start_eps
return self.start_eps + eps_diff * (t / self.decay_steps)
| 28.857143
| 69
| 0.617574
|
4a19571fefb194c476560375bc58f92935b6e59f
| 9,349
|
py
|
Python
|
Help/gen-sphinx/make_rest.py
|
constellation-app/miscellaneous
|
2c80e3472d076afb3bf7a944088b3fa93437b238
|
[
"Apache-2.0"
] | 1
|
2019-12-16T02:50:11.000Z
|
2019-12-16T02:50:11.000Z
|
Help/gen-sphinx/make_rest.py
|
constellation-app/miscellaneous
|
2c80e3472d076afb3bf7a944088b3fa93437b238
|
[
"Apache-2.0"
] | null | null | null |
Help/gen-sphinx/make_rest.py
|
constellation-app/miscellaneous
|
2c80e3472d076afb3bf7a944088b3fa93437b238
|
[
"Apache-2.0"
] | 1
|
2019-12-18T09:55:33.000Z
|
2019-12-18T09:55:33.000Z
|
import argparse
from pathlib import Path
import xml.etree.ElementTree as ET
import shutil
import datetime
import pprint
from parsehelp import parse_html
# Convert NetBeans HelpSet files to ReStructuredText suitable for Sphinx.
#
# Find all the package-info.java files that contain '@HelpSetRegistration'.
# Get the name of the helpset xml and parse that to get the map and toc values.
# Merge the tocs into a single toc.
# Add the helpId as a comment to each file.
ITEMS = '__items__'
INDEX_RST = '''.. Constellation documentation master file, created by
{} on {}.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
{}
{}
.. toctree::
:maxdepth: 2
:caption: Contents:
{}
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`
'''
def helpsets(dir):
"""Yield NetBeans HelpSet marker files."""
for pinfo in dir.rglob('package-info.java'):
with pinfo.open() as f:
for line in f.readlines():
if line.startswith('@HelpSetRegistration'):
q1 = line.index('"')
q2 = line.index('"', q1+1)
name = line[q1+1:q2]
hs = pinfo.with_name(name)
yield hs
def parse_helpset(hs):
"""Parse a -hs.xml helpset file."""
hs_xml = ET.parse(str(hs))
root = hs_xml.getroot()
# print(root)
refs = {}
for child in root:
if child.tag=='maps':
mapref = child.find('mapref')
location = mapref.attrib['location']
# print(location)
refs['location'] = location
elif child.tag=='view':
type = child.find('type').text
data = child.find('data').text
refs[type] = data
return refs
def parse_map(hs, m):
"""Parse a -map.html helpset mapping file."""
m = hs.with_name(m)
m_xml = ET.parse(str(m))
root = m_xml.getroot()
maps = {}
for child in root:
assert child.tag=='mapID'
target = child.attrib['target']
url = child.attrib['url']
maps[target] = hs.with_name(url)
return maps
def parse_toc(hs, toc):
"""Parse a -toc.xml helpset table-of-contents file.
Slightly trickier, because there are levels of <tocitem> tags.
Each level has a 'text' attrib, but only the leaves have
a 'target' attrib'.
Just do it recursively.
"""
# Leaf items are collected in a list.
#
def toc_level(tocs, root):
for item in root.findall('tocitem'):
text = item.attrib['text']
if 'target' in item.attrib:
# This is a leaf referencing a help target.
#
tocs[ITEMS].append((text, item.attrib['target']))
else:
if text not in tocs:
tocs[text] = {ITEMS:[]}
toc_level(tocs[text], item)
# If there are no leaves at this level, remove the empty list.
#
if not tocs[text][ITEMS]:
del tocs[text][ITEMS]
tocs = {}
toc = hs.with_name(toc)
toc_xml = ET.parse(str(toc))
root = toc_xml.getroot()
toc_level(tocs, root)
return tocs
def merge_tocs(toc_list):
"""Merge a list of tocs into a single toc.
Each level of toc is a dict with two optional keys:
* name - the name of the level, contains a dict of the next level
* '__items__' - a list of (name,target) tuples.
Recursive, obviously.
"""
def merge_level(merged, level):
for k,v in level.items():
if k==ITEMS:
if ITEMS not in merged:
merged[ITEMS] = []
merged[ITEMS].extend(v)
else:
if k not in merged:
merged[k] = {}
merge_level(merged[k], v)
toc1 = {}
for toc in toc_list:
merge_level(toc1, toc)
return toc1
def generate_pages(outdir, merged_tocs, merged_maps):
"""Generate documentation in a proper directory hierarchy.
This means an index.rst file at eacg level.
"""
def simple_name(name):
return ''.join(c if '0'<=c<='9' or 'a'<=c<='z' else '_' for c in name.lower())
def ensure_dir(dir, category):
d = dir / category
if not d.is_dir():
d.mkdir()
def tree(category, toc, levels):
level = '/'.join(levels)
ensure_dir(outdir, level)
if '__items__' in toc:
for doc in toc['__items__']:
help_id = doc[1]
in_html = merged_maps[help_id]
out_rst = outdir / level / Path(in_html).with_suffix('.rst').name
yield level, category, in_html, out_rst, help_id
for sub_category in toc:
cat = simple_name(sub_category)
if sub_category!='__items__':
sublevel = levels[:]
sublevel.append(cat)
# Yield the index of the next level down.
# index files don't have matching HTML files or NetBeans helpIds.
#
sl = '/'.join(sublevel)
yield level, category, None, outdir / sl / 'index.rst', None
# Recursively yield the next level down.
#
yield from tree(sub_category, toc[sub_category], sublevel)
yield from tree('CONSTELLATION', merged_tocs, [])
if __name__=='__main__':
def dir_req(s):
"""Require this parameter to be a directory, and convert it to a Path instance."""
p = Path(s)
if not p.is_dir():
raise argparse.ArgumentTypeError('Must be a directory')
return p
parser = argparse.ArgumentParser(description='Process existing HTML to ReST.')
parser.add_argument('--indir', type=dir_req, required=True, help='Directory containing NetBeans help')
parser.add_argument('--outdir', type=dir_req, required=True, help='Output directory tree')
args = parser.parse_args()
print(args.indir, args.outdir)
merged_maps = {}
toc_list = []
for hs in helpsets(args.indir):
# print(hs)
refs = parse_helpset(hs)
# print(refs)
maps = parse_map(hs, refs['location'])
# print(maps)
for target, url in maps.items():
if target in merged_maps:
raise ValueError(f'Target {target} already found')
merged_maps[target] = url
toc = parse_toc(hs, refs['javax.help.TOCView'])
# pprint.pprint(toc)
toc_list.append(toc)
# break
# pprint.pprint(toc_list)
merged_tocs = merge_tocs(toc_list)
# pprint.pprint(merged_tocs)
print()
# print(merged_tocs.keys())
print()
# print(merged_maps)
print()
# We need an index.rst in each directory.
# Keep track of the levels so we can generate them at the end.
#
levels = {}
# # We also need a mapping of helpId to help page.
# # NetBeans code runs on helpIds and we don't want to change that,
# # so the help service needs to accept a helpId and map it to the correct page.
# #
# help_map = {}
for level, category, in_html, out_rst, help_id in generate_pages(args.outdir, merged_tocs, merged_maps):
lc = level,category
if lc not in levels:
levels[lc] = []
levels[lc].append(out_rst)
if in_html:
# This is a help .rst file (not a category / index.rst file).
#
print(in_html)
rest, resources = parse_html(in_html)
with open(out_rst, 'w', encoding='utf8') as f:
f.write(rest)
# Include the helpId in a comment directive that can be
# detected at documentation build time to create the help_map.txt file.
#
f.write(f'\n.. help-id: {help_id}\n')
for res_source, res_target in resources:
s = in_html.parent / res_source
t = out_rst.parent / res_target
# print(f'Copying resource {s} to {t} ...')
shutil.copy(s, t)
# help_map[help_id] = out_rst
# Create an index.rst at each level.
# Each index.rst must have a reference to the index files below it.
#
now = datetime.datetime.now().isoformat(' ')[:19]
for (level, category), rst_files in levels.items():
pages = []
for page in rst_files:
p = Path(page)
if p.name=='index.rst':
entry = f'{p.parent.name}/index'
else:
entry = p.stem
pages.append(f' {entry}')
mup = '=' * len(category)
contents = INDEX_RST.format(__file__, now, category, mup, '\n'.join(pages))
with open(args.outdir / level / 'index.rst', 'w') as f:
f.write(contents)
# # Save the mapping from helpId to page, so NetBeans help knows where to find stuff.
# #
# # pprint.pprint(help_map)
# with open(args.outdir / 'help_map.txt', 'w') as f:
# for help_id, rst in help_map.items():
# rst = rst.with_suffix('')
# relative_rst = str(rst.relative_to(args.outdir)).replace('\\', '/')
# print(f'{help_id},{relative_rst}', file=f)
| 29.773885
| 108
| 0.565087
|
4a1957bb1adde82551f589c10b3525032638db04
| 400
|
py
|
Python
|
tests/test_cli.py
|
myeggs/ward
|
52fdf1a2768e5de26081e2990f6f3dc44cb9558c
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
myeggs/ward
|
52fdf1a2768e5de26081e2990f6f3dc44cb9558c
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
myeggs/ward
|
52fdf1a2768e5de26081e2990f6f3dc44cb9558c
|
[
"MIT"
] | null | null | null |
from click.testing import CliRunner
from ward import each, test
from ward._run import run
@test("Cannot use bar progress style with {output_style} output style")
def _(output_style=each("dots-global", "dots-module")):
runner = CliRunner()
result = runner.invoke(
run, ["test", "--progress-style", "bar", "--test-output-style", output_style]
)
assert result.exit_code == 2
| 26.666667
| 85
| 0.6875
|
4a1958d0420fd39f767a987c4e6e8a49a7c0bf37
| 6,748
|
py
|
Python
|
selecting_OOD_detector/utils/hyperparameter_search.py
|
the-mama-ai/selecting_OOD_detector
|
1708dd2e46826c6c7a641e5a2259c7003fd24584
|
[
"MIT"
] | null | null | null |
selecting_OOD_detector/utils/hyperparameter_search.py
|
the-mama-ai/selecting_OOD_detector
|
1708dd2e46826c6c7a641e5a2259c7003fd24584
|
[
"MIT"
] | null | null | null |
selecting_OOD_detector/utils/hyperparameter_search.py
|
the-mama-ai/selecting_OOD_detector
|
1708dd2e46826c6c7a641e5a2259c7003fd24584
|
[
"MIT"
] | 1
|
2022-03-07T15:39:30.000Z
|
2022-03-07T15:39:30.000Z
|
"""
A module with helper functions for running a hyperparameter search.
Code adapted from https://github.com/Pacmed/ehr_ood_detection/blob/master/src/experiments/hyperparameter_search.py
"""
from typing import Optional
import os
from sklearn.model_selection import ParameterSampler
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from selecting_OOD_detector.utils.general import check_and_convert_dfs_to_numpy, save_dictionary_as_json
from selecting_OOD_detector.models.novelty_estimator import NoveltyEstimator
def sample_hyperparameters(
model_name: str,
hyperparameter_grid: dict,
hyperparameters_names: dict,
n_evals: int = 20,
):
"""
Sample the hyperparameters for different runs of the same model. The distributions parameters are sampled from are
defined the provided hyperparamter grid.
Parameters
----------
model_name: str
Name of the model.
hyperparameter_grid: dict
Dictionary of all possible values to be tested.
hyperparameters_names: dict
Dictionary containing model names and names of hyperparamaters that they use.
n_evals: int
Number of evaluations to run for the model.
Returns
-------
sampled_params: list
List of dictionaries containing hyperparameters and their sampled values.
"""
sampled_params = list(
ParameterSampler(
param_distributions={
hyperparam: hyperparameter_grid[hyperparam]
for hyperparam in hyperparameters_names[model_name]
if hyperparam in hyperparameter_grid
},
n_iter=n_evals,
)
)
return sampled_params
def evaluate_set_of_parameters(model: NoveltyEstimator,
X_train: pd.DataFrame,
X_val: pd.DataFrame,
train_params: dict,
y_train: Optional[pd.DataFrame] = None,
y_val: Optional[pd.DataFrame] = None):
"""
Runs a single round of training and evaluation for a set of paramaters.
Parameters
----------
model: NoveltyEstimator
Model to be trained and evaluated.
X_train: pd.DataFrame
Training data.
X_val: pd.DataFrame
Validation data to calculate scores on.
train_params: dict
Parameters to be added to ``train`` function of the model.
y_train: Optional(pd.DataFrame):
Labels corresponding to the training data. Only used for discriminator models.
y_val: Optional(pd.DataFrame)
Labels corresponding to the validation data. Only used for discriminator models.
Returns
-------
score: float
Score corresponding to the performance of the model. Either AUC-ROC score of predicting the correct
labels for discriminators or likelihood of data for density estimators.
"""
X_train, X_val, y_train, y_val = check_and_convert_dfs_to_numpy([X_train, X_val, y_train, y_val])
model.train(X_train, y_train=y_train, **train_params)
# For density estimators, evaluate according to the highest likelihood on data (same as the lowest novelty score)
if model.model_type == "density_estimator":
preds = -model.get_novelty_score(X_val)
score = float(preds.mean())
# For discriminators, evaluate according to the lowest prediction error using AUC-ROC score
elif model.model_type == "discriminator":
preds = model.predict_proba(X_val)
if np.isnan(preds).all():
score = 0
else:
preds = preds[:, 1]
score = roc_auc_score(
y_true=y_val[~np.isnan(preds)],
y_score=preds[~np.isnan(preds)],
)
print(f"\tscore: {score}")
else:
raise NotImplementedError("Only density estimators and discriminators are implemented at the moment.")
return score
def evaluate_hyperparameters(model_name: str,
model_class: NoveltyEstimator,
X_train: pd.DataFrame,
X_val: pd.DataFrame,
hyperparameter_grid: dict,
hyperparameters_names: dict,
train_params: dict,
y_train: pd.DataFrame = None,
y_val: pd.DataFrame = None,
num_evals: int = 20,
save_intermediate_scores: bool = True,
save_dir: Optional[str] = None,
):
scores, sorted_scores = {}, {}
sampled_params = sample_hyperparameters(model_name,
hyperparameter_grid=hyperparameter_grid,
hyperparameters_names=hyperparameters_names,
n_evals=num_evals)
for run, param_set in enumerate(sampled_params):
print(f"\t{run + 1}/{len(sampled_params)}", end=" ")
param_set.update(input_size=X_train.shape[1])
model = model_class(**param_set)
# Run a single evaluation on the set of parameters
try:
score = evaluate_set_of_parameters(model=model, train_params=train_params,
X_train=X_train, X_val=X_val,
y_train=y_train, y_val=y_val)
# In case of nans due bad training parameter
except (ValueError, RuntimeError) as e:
print(f"\tskipped the current run due to an error: {str(e)}", end=" ")
score = -np.inf
if np.isnan(score):
score = -np.inf
# Save results of the single run
print(f"\tscore = {round(score, 2)}")
scores[run] = {"score": score, "hyperparameters": param_set}
# Sort the scores such that the best performing paramameters are displayed first
sorted_scores = dict(
list(sorted(scores.items(), key=lambda run: run[1]["score"], reverse=True))
)
# Save results for each run in case of an unexpected interruption
if save_intermediate_scores:
_save_hyperparameter_scores(scores=sorted_scores, model_name=model_name, save_dir=save_dir)
return sorted_scores
def _save_hyperparameter_scores(scores, model_name, save_dir=None):
"""
Saves scores and parameters for a model to a json file.
"""
if save_dir is None:
save_dir = "../data/hyperparameters/scores/"
save_dictionary_as_json(dictn=scores, save_name=f"scores_{model_name}", save_dir=save_dir)
| 37.488889
| 118
| 0.61559
|
4a195937448ee8d6507421cbb34f613baf3f0533
| 2,685
|
py
|
Python
|
deploy/demo-cli/blox-create-deployment.py
|
kylbarnes/blox
|
53e3b472581568fd8baccd9c5097800cf433bd77
|
[
"Apache-2.0"
] | null | null | null |
deploy/demo-cli/blox-create-deployment.py
|
kylbarnes/blox
|
53e3b472581568fd8baccd9c5097800cf433bd77
|
[
"Apache-2.0"
] | null | null | null |
deploy/demo-cli/blox-create-deployment.py
|
kylbarnes/blox
|
53e3b472581568fd8baccd9c5097800cf433bd77
|
[
"Apache-2.0"
] | 1
|
2018-08-04T19:10:28.000Z
|
2018-08-04T19:10:28.000Z
|
#!/usr/bin/env python
import json, os, sys
import common
def main(argv):
# Command Line Arguments
args = [{'arg':'--apigateway', 'dest':'apigateway', 'default':None, 'type':'boolean', 'help':'Call API Gateway endpoint'}]
if '--apigateway' in argv:
args.extend([{'arg':'--stack', 'dest':'stack', 'default':None, 'help':'CloudFormation stack name'}])
else:
args.extend([{'arg':'--host', 'dest':'host', 'default':'localhost:2000', 'help':'Blox Scheduler <Host>:<Port>'}])
args.extend([{'arg':'--environment', 'dest':'environment', 'default':None, 'help':'Blox environment name'}])
args.extend([{'arg':'--deployment-token', 'dest':'token', 'default':None, 'help':'Blox deployment token'}])
# Parse Command Line Arguments
params = common.parse_cli_args('Create Blox Deployment', args)
if params.apigateway:
run_apigateway(params)
else:
run_local(params)
# Call Blox Scheduler API Gateway Endpoint
def run_apigateway(params):
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "RestApi"]
restApi = common.run_shell_command(params.region, command)
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "ApiResource"]
restResource = common.run_shell_command(params.region, command)
uri = '/v1/environments/%s/deployments' % params.environment
queryParams = {'deploymentToken': params.token}
uri += common.get_query_string(queryParams)
command = ["apigateway", "test-invoke-method", "--rest-api-id", restApi['StackResourceDetail']['PhysicalResourceId'], "--resource-id", restResource['StackResourceDetail']['PhysicalResourceId'], "--http-method", "POST", "--headers", "{}", "--path-with-query-string", uri, "--body", ""]
response = common.run_shell_command(params.region, command)
print "HTTP Response Code: %d" % response['status']
try:
obj = json.loads(response['body'])
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print json.dumps(response, indent=2)
sys.exit(1)
# Call Blox Scheduler Local Endpoint
def run_local(params):
api = common.Object()
api.method = 'POST'
api.headers = {}
api.host = params.host
api.uri = '/v1/environments/%s/deployments' % params.environment
api.queryParams = {'deploymentToken': params.token}
api.data = None
response = common.call_api(api)
print "HTTP Response Code: %d" % response.status
try:
obj = json.loads(response.body)
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print response.body
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| 37.291667
| 285
| 0.699814
|
4a19596c10259847db51928cfabaf22ad7565089
| 3,985
|
py
|
Python
|
sitefab/Logger.py
|
ebursztein/SiteFab
|
3f8662fe5c91c7f631932cf333e6eae5e146077c
|
[
"Apache-2.0"
] | 10
|
2017-01-02T02:48:27.000Z
|
2019-09-18T22:44:29.000Z
|
sitefab/Logger.py
|
ebursztein/SiteFab
|
3f8662fe5c91c7f631932cf333e6eae5e146077c
|
[
"Apache-2.0"
] | 173
|
2016-12-29T05:24:31.000Z
|
2017-12-29T10:53:35.000Z
|
sitefab/Logger.py
|
ebursztein/SiteFab
|
3f8662fe5c91c7f631932cf333e6eae5e146077c
|
[
"Apache-2.0"
] | 8
|
2017-04-11T14:34:03.000Z
|
2019-06-17T09:29:17.000Z
|
""" Handle SiteFab log output
"""
import time
from collections import defaultdict
from collections import Counter
from jinja2 import Environment, FileSystemLoader
from . import utils
from . import files
class Logger():
""" SiteFab logging system
Note:
while the logging system render log in html using jinja2 it use
a completly separated on to avoid interferring with user
configuration. Templates are located in the config directory
under internal_template/
"""
def __init__(self, config, site):
self.config = config
self.site = site # reference to the main object
self.logs = {}
self.jinja2 = Environment(loader=FileSystemLoader(
str(self.config.template_dir)))
files.clean_dir(self.config.output_dir)
# statistics #
def write_stats(self):
"Output statistics about the execution"
# post per category
cat_stats = Counter()
cats = self.site.posts_by_category.get_as_dict()
for tag, data in cats.items():
cat_stats[tag] = data.meta.num_posts
# post per tag
tag_stats = Counter()
tags = self.site.posts_by_tag.get_as_dict()
for tag, data in tags.items():
tag_stats[tag] = data.meta.num_posts
template = self.jinja2.get_template(self.config.stats_template)
rv = template.render(cats=cat_stats.most_common(),
tags=tag_stats.most_common())
files.write_file(self.config.output_dir, "stats.html", rv)
def create_log(self, category, name, filename):
""" Create a new log
Usually used to store a plugin output or a phase information
"""
log = utils.dict_to_objdict()
log.meta = utils.dict_to_objdict()
log.events = []
log.meta.name = name
log.meta.category = category
log.meta.filename = filename
log.meta.start_time = time.time()
log.meta.num_events = 0
log.meta.ok = 0
log.meta.skipped = 0
log.meta.errors = 0
log_id = "%s:%s" % (category, name)
self.logs[log_id] = log
return log_id
def record_event(self, log_id, target, severity, details):
""" Record a event to a given log
"""
if log_id not in self.logs:
return False
# recording event
event = utils.dict_to_objdict()
event.time = time.time()
event.target = target
event.severity = severity
event.details = details
self.logs[log_id].meta.num_events += 1
# severity
if severity == self.site.OK:
self.logs[log_id].meta.ok += 1
event.severity = "OK"
elif severity == self.site.SKIPPED:
event.severity = "SKIPPED"
self.logs[log_id].meta.skipped += 1
elif severity == self.site.ERROR:
event.severity = "ERROR"
self.logs[log_id].meta.errors += 1
self.logs[log_id].events.append(event)
return True
def write_log(self, log_id):
""" Write log
"""
if log_id not in self.logs:
return False
lg = self.logs[log_id]
lg.meta.exec_time = round(time.time() - lg.meta.start_time, 2)
template = self.jinja2.get_template(str(self.config.log_template))
rv = template.render(events=lg.events, meta=lg.meta)
files.write_file(self.config.output_dir, lg.meta.filename, rv)
return True
def write_log_index(self):
" Generate the index.html file that list all generated logs"
# allows to output by group
logs = defaultdict(list)
for l in self.logs.values():
logs[l.meta.category].append(l)
template = self.jinja2.get_template(self.config.log_index_template)
rv = template.render(logs=logs)
files.write_file(self.config.output_dir, "index.html", rv)
| 32.663934
| 75
| 0.607026
|
4a1959bab287142e9d5a3f7509aafe39dfc32073
| 1,067
|
py
|
Python
|
functions/sample/python/reviews.py
|
Bedil09/agfzb-CloudAppDevelopment_Capstone
|
b1ceddac60c9d5551ce3d87e7371f15cc8be2d52
|
[
"Apache-2.0"
] | null | null | null |
functions/sample/python/reviews.py
|
Bedil09/agfzb-CloudAppDevelopment_Capstone
|
b1ceddac60c9d5551ce3d87e7371f15cc8be2d52
|
[
"Apache-2.0"
] | null | null | null |
functions/sample/python/reviews.py
|
Bedil09/agfzb-CloudAppDevelopment_Capstone
|
b1ceddac60c9d5551ce3d87e7371f15cc8be2d52
|
[
"Apache-2.0"
] | null | null | null |
from cloudant.client import Cloudant
from cloudant.error import CloudantException
import requests
def main(dict):
secret = {
"COUCH_URL": "https://f12b5d67-d718-4b5a-ab21-b00755ea589e-bluemix.cloudantnosqldb.appdomain.cloud",
"COUCH_USERNAME": "f12b5d67-d718-4b5a-ab21-b00755ea589e-bluemix",
"IAM_API_KEY": "hhLIBg18IlcYe-ir_xf6aXAt3prqG4zNRjY_GDLAJwAH",
}
client = Cloudant.iam(
account_name=secret["COUCH_USERNAME"],
api_key=secret["IAM_API_KEY"],
connect=True,
)
my_database = client["reviews"]
try:
selector = {'id': {'$eq': int(dict["id"])}}
rows = my_database.get_query_result(
selector, fields=['id', 'name', 'dealership', 'review', 'purchase',
'purchase_date', 'car_make', 'car_model', 'car_year'], raw_result=True)
result = {
'body': {'data': rows}
}
return result
except:
return {
'statusCode': 404,
'message': 'Something went wrong',
}
| 26.675
| 108
| 0.591378
|
4a195aa426ff4bbf1c3dd86f9fda929c22553ea2
| 9,686
|
py
|
Python
|
src/orion/core/io/convert.py
|
mgermain/orion
|
b0932da99cac5c3db9bbf662588c581cb6ca1849
|
[
"BSD-3-Clause"
] | null | null | null |
src/orion/core/io/convert.py
|
mgermain/orion
|
b0932da99cac5c3db9bbf662588c581cb6ca1849
|
[
"BSD-3-Clause"
] | null | null | null |
src/orion/core/io/convert.py
|
mgermain/orion
|
b0932da99cac5c3db9bbf662588c581cb6ca1849
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Parse and generate user script's configuration
==============================================
Defines and instantiates a converter for configuration file types.
Given a file path infer which configuration file parser/emitter it corresponds to.
Define `Converter` classes with a common interface for many popular configuration
file types.
Currently supported:
- YAML
- JSON
- See below, for configuration agnostic parsing
A `GenericConverter` is provided that tries and parses configuration
files, regardless of their type, according to predefined Oríon's markers.
"""
import importlib
import os
from abc import ABC, abstractmethod
from collections import deque
from orion.core.utils import Factory, nesteddict
def infer_converter_from_file_type(config_path, regex=None, default_keyword=""):
"""Use filetype extension to infer and build the correct configuration file
converter.
"""
_, ext_type = os.path.splitext(os.path.abspath(config_path))
for klass in Converter.types.values():
if ext_type in klass.file_extensions:
return klass()
if regex is None:
return GenericConverter(expression_prefix=default_keyword)
return GenericConverter(regex, expression_prefix=default_keyword)
class BaseConverter(ABC):
"""Base class for configuration parsers/generators.
Attributes
----------
file_extensions : list of strings
Strings starting with '.' which identify usually a file type as a
common convention. For instance, ``['.yml', '.yaml']`` for YAML files.
"""
file_extensions = []
# pylint:disable=no-self-use
def get_state_dict(self):
"""Give state dict that can be used to reconstruct the converter"""
return {}
def set_state_dict(self, state):
"""Reset the converter based on previous state"""
pass
@abstractmethod
def parse(self, filepath):
"""Read dictionary out of the configuration file.
Parameters
----------
filepath : str
Full path to the original user script's configuration.
"""
pass
@abstractmethod
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
pass
class YAMLConverter(BaseConverter):
"""Converter for YAML files."""
file_extensions = [".yml", ".yaml"]
def __init__(self):
"""Try to dynamically import yaml module."""
self.yaml = importlib.import_module("yaml")
def parse(self, filepath):
"""Read dictionary out of the configuration file.
Parameters
----------
file : str
Full path to the original user script's configuration.
"""
with open(filepath) as f:
return self.yaml.safe_load(stream=f)
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
with open(filepath, "w") as f:
self.yaml.dump(data, stream=f)
class JSONConverter(BaseConverter):
"""Converter for JSON files."""
file_extensions = [".json"]
def __init__(self):
"""Try to dynamically import json module."""
self.json = importlib.import_module("json")
def parse(self, filepath):
"""Read dictionary out of the configuration file.
Parameters
----------
file : str
Full path to the original user script's configuration.
"""
with open(filepath) as f:
return self.json.load(f)
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
with open(filepath, "w") as f:
self.json.dump(data, f)
class GenericConverter(BaseConverter):
"""Generic converter for any configuration file type.
For each parameter dimension declared here, one must necessarily
provide a ``name`` keyword inside the `Dimension` building expression.
Implementation details: As this class is supposed to provide with a
generic text parser, semantics are going to be tied to their consequent
usage. A template document is going to be created on `parse` and filled
with values on `read`. This template document consists the state of this
`Converter` object.
Dimension should be defined for instance as:
``meaningful_name~uniform(0, 4)``
"""
def __init__(
self,
regex=r"([\/]?[\w|\/|-]+)~([\+]?.*\)|\-|\>[A-Za-z_]\w*)",
expression_prefix="",
):
"""Initialize with the regex expression which will be searched for
to define a `Dimension`.
"""
self.re_module = importlib.import_module("re")
self.regex = self.re_module.compile(regex)
self.expression_prefix = expression_prefix
self.template = None
self.has_leading = dict()
self.conflict_msg = "Namespace conflict in configuration file '{}', under '{}'"
def get_state_dict(self):
"""Give state dict that can be used to reconstruct the converter"""
return dict(
regex=self.regex.pattern,
expression_prefix=self.expression_prefix,
template=self.template,
has_leading=self.has_leading,
)
def set_state_dict(self, state):
"""Reset the converter based on previous state"""
self.regex = self.re_module.compile(state["regex"])
self.expression_prefix = state["expression_prefix"]
self.template = state["template"]
self.has_leading = state["has_leading"]
def _raise_conflict(self, path, namespace):
raise ValueError(self.conflict_msg.format(path, namespace))
def parse(self, filepath):
r"""Read dictionary out of the configuration file.
Create a template for Python 3 string format and save it as this
object's state, by substituing '{\1}' wherever the pattern
was matched. By default, the first matched group (\1) corresponds
with a dimension's namespace.
.. note:: Namespace in substitution templates does not contain the first '/'.
Parameters
----------
filepath : str
Full path to the original user script's configuration.
"""
with open(filepath) as f:
self.template = f.read()
# Search for Oríon semantic pattern
pairs = self.regex.findall(self.template)
ret = dict(pairs)
# Every namespace given should be unique,
# raise conflict if there are duplicates
if len(pairs) != len(ret):
namespaces = list(zip(*pairs))[0]
for name in namespaces:
if namespaces.count(name) != 1:
self._raise_conflict(filepath, name)
# Create template using each namespace as format key,
# exactly as provided by the user
subst = self.re_module.sub(r"{", r"{{", self.template)
subst = self.re_module.sub(r"}", r"}}", subst)
substituted, num_subs = self.regex.subn(r"{\1!s}", subst)
assert len(ret) == num_subs, (
"This means an error in the regex. Report bug. Details::\n"
"original: {}\n, regex:{}".format(self.template, self.regex)
)
self.template = substituted
# Wrap it in style of what the rest of `Converter`s return
ret_nested = nesteddict()
for namespace, expression in ret.items():
keys = namespace.split("/")
if not keys[0]: # It means that user wrote a namespace starting from '/'
keys = keys[1:] # Safe because of the regex pattern
self.has_leading[namespace[1:]] = "/"
stuff = ret_nested
for i, key in enumerate(keys[:-1]):
stuff = stuff[key]
if isinstance(stuff, str):
# If `stuff` is not a dictionary while traversing the
# namespace path, then this amounts to a conflict which was
# not sufficiently get caught
self._raise_conflict(filepath, "/".join(keys[: i + 1]))
# If final value is already filled,
# then this must be also due to a conflict
if stuff[keys[-1]]:
self._raise_conflict(filepath, namespace)
# Keep compatibility with `SpaceBuilder._build_from_config`
stuff[keys[-1]] = self.expression_prefix + expression
return ret_nested
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
unnested_data = dict()
stack = deque()
stack.append(([], data))
while True:
try:
namespace, stuff = stack.pop()
except IndexError:
break
if isinstance(stuff, dict):
for k, v in stuff.items():
stack.append((["/".join(namespace + [str(k)])], v))
else:
name = namespace[0]
unnested_data[self.has_leading.get(name, "") + name] = stuff
print(self.template)
print(unnested_data)
document = self.template.format(**unnested_data)
with open(filepath, "w") as f:
f.write(document)
# pylint: disable=too-few-public-methods,abstract-method
class Converter(BaseConverter, metaclass=Factory):
"""Class used to inject dependency on a configuration file parser/generator.
.. seealso:: :class:`orion.core.utils.Factory` metaclass and `BaseConverter` interface.
"""
pass
| 33.4
| 91
| 0.615528
|
4a195b5541adba6ed58510d9f7fe8762fec8b4bb
| 309
|
py
|
Python
|
Contest/ABC147/c/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC147/c/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC147/c/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
from itertools import*
I = input
n = int(I())
c = 0;z = []
for i in range(n):
for j in range(int(I())):
z += [[i] + list(map(int, I().split()))]
for j in product([0,1], repeat=n):
if all(j[i] == 0 or j[x-1] - y == 0 for i, x, y in z):
c = max(c, sum(j))
print(c)
| 25.75
| 58
| 0.501618
|
4a195d0635407548f65616247cf7b37ee23616c2
| 569
|
py
|
Python
|
tests/unet_test.py
|
rutugandhi/Neuron-Finder
|
76d771bb37b7c73f884dc4a018fa19090ec904d6
|
[
"MIT"
] | null | null | null |
tests/unet_test.py
|
rutugandhi/Neuron-Finder
|
76d771bb37b7c73f884dc4a018fa19090ec904d6
|
[
"MIT"
] | null | null | null |
tests/unet_test.py
|
rutugandhi/Neuron-Finder
|
76d771bb37b7c73f884dc4a018fa19090ec904d6
|
[
"MIT"
] | null | null | null |
from src.unet import unet
unet = UNet()
def dice_coef_test():
#Creating testing arrays
y_true = np.array([[1,2,3],[1,2,3],[1,2,3]])
y_pred = np.array([[3,2,1],[3,2,1],[3,2,1]])
dc = unet.dice_coef(y_true,y_pred)
#Correct Answer is 2*3/(9+9)=6/18=1/3
assert dc == (1/3)
def dice_coef_loss_test():
#Creating testing arrays
y_true = np.array([[1,2,3],[1,2,3],[1,2,3]])
y_pred = np.array([[3,2,1],[3,2,1],[3,2,1]])
dcl = dice_coef_loss(y_true,y_pred)
#Correct Answer is -(2*3/(9+9))=-(6/18)=-(1/3)
assert dcl == -(1/3)
| 24.73913
| 50
| 0.567663
|
4a195d5c7091777c85414c923de0a701c67790b5
| 447
|
py
|
Python
|
basic_accounting/basic_accounting/doctype/payment_entry_for_supplier/payment_entry_for_supplier.py
|
EPIsumeet/Accounting-App
|
82836ee9e5dc21a0292b8590d8ae2c60b9b77b3f
|
[
"MIT"
] | null | null | null |
basic_accounting/basic_accounting/doctype/payment_entry_for_supplier/payment_entry_for_supplier.py
|
EPIsumeet/Accounting-App
|
82836ee9e5dc21a0292b8590d8ae2c60b9b77b3f
|
[
"MIT"
] | null | null | null |
basic_accounting/basic_accounting/doctype/payment_entry_for_supplier/payment_entry_for_supplier.py
|
EPIsumeet/Accounting-App
|
82836ee9e5dc21a0292b8590d8ae2c60b9b77b3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Sherlock Holmes and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class PaymentEntryForSupplier(Document):
def before_submit(self):
supplier_exists = frappe.db.exists(
"Payment Entry for Supplier",
{
"party_name": self.party_name
}
)
if supplier_exists:
pass
| 22.35
| 54
| 0.740492
|
4a195da4e4e59bef0f4858c98b8b3b635a85fd57
| 89
|
py
|
Python
|
home_user/dj_iot/iotdata/apps.py
|
IoTree/IoTree42
|
b7bb31f39add4e719a04e63cdd336983f8017137
|
[
"MIT"
] | 4
|
2020-06-04T08:43:54.000Z
|
2021-11-15T17:29:23.000Z
|
home_user/dj_iot/iotdata/apps.py
|
IoTree/IoTree42
|
b7bb31f39add4e719a04e63cdd336983f8017137
|
[
"MIT"
] | 3
|
2020-05-02T10:53:03.000Z
|
2021-05-20T13:17:08.000Z
|
home_user/dj_iot/iotdata/apps.py
|
IoTree/IoTree42
|
b7bb31f39add4e719a04e63cdd336983f8017137
|
[
"MIT"
] | 3
|
2020-10-27T13:06:51.000Z
|
2022-01-08T14:56:36.000Z
|
from django.apps import AppConfig
class IotdataConfig(AppConfig):
name = 'iotdata'
| 14.833333
| 33
| 0.752809
|
4a195e54e1a7363e21be9b30bc9a2a09d710e8b5
| 530
|
py
|
Python
|
lab/refactoring/extract_method3.py
|
LukazDane/SPD-2.31-Testing-and-Architecture
|
63905bf4efde55e5c32d7cfe3ac46abdb2173485
|
[
"MIT"
] | null | null | null |
lab/refactoring/extract_method3.py
|
LukazDane/SPD-2.31-Testing-and-Architecture
|
63905bf4efde55e5c32d7cfe3ac46abdb2173485
|
[
"MIT"
] | null | null | null |
lab/refactoring/extract_method3.py
|
LukazDane/SPD-2.31-Testing-and-Architecture
|
63905bf4efde55e5c32d7cfe3ac46abdb2173485
|
[
"MIT"
] | null | null | null |
# Written by Kamran Bigdely
# Example for Compose Methods: Extract Method.
import math
xc1 = 4
yc1 = 4.25
xc2 = 53
yc2 = -5.35
# Calculate the distance between the two circle
distance = math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2)
print('distance', distance)
# *** somewhere else in your program ***
xa = -36
ya = 97
xb = .34
yb = .91
# calcualte the length of vector AB vector which is a vector between A and B points.
length = length(xa, ya, xb, yb)
# length = math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb))
print('length', length)
| 24.090909
| 84
| 0.669811
|
4a195fcd115507e36da660549c55c283ba94fb42
| 6,593
|
py
|
Python
|
ansys/dpf/core/operators/geo/normals.py
|
TheGoldfish01/pydpf-core
|
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
|
[
"MIT"
] | 11
|
2021-01-31T15:50:02.000Z
|
2021-10-01T23:15:38.000Z
|
ansys/dpf/core/operators/geo/normals.py
|
TheGoldfish01/pydpf-core
|
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
|
[
"MIT"
] | 46
|
2021-01-14T05:00:50.000Z
|
2021-10-06T18:30:37.000Z
|
ansys/dpf/core/operators/geo/normals.py
|
TheGoldfish01/pydpf-core
|
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
|
[
"MIT"
] | 3
|
2021-06-30T07:18:30.000Z
|
2021-09-15T08:43:11.000Z
|
"""
normals
=======
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from meshOperatorsCore plugin, from "geo" category
"""
class normals(Operator):
"""compute the normals at the given nodes or element scoping based on the given mesh (first version, the element normal is only handled on the shell elements)
available inputs:
- mesh (MeshedRegion) (optional)
- mesh_scoping (Scoping) (optional)
- field (Field) (optional)
available outputs:
- field (Field)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.geo.normals()
>>> # Make input connections
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_mesh_scoping = dpf.Scoping()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_field = dpf.Field()
>>> op.inputs.field.connect(my_field)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.geo.normals(mesh=my_mesh,mesh_scoping=my_mesh_scoping,field=my_field)
>>> # Get output data
>>> result_field = op.outputs.field()"""
def __init__(self, mesh=None, mesh_scoping=None, field=None, config=None, server=None):
super().__init__(name="normals_provider", config = config, server = server)
self._inputs = InputsNormals(self)
self._outputs = OutputsNormals(self)
if mesh !=None:
self.inputs.mesh.connect(mesh)
if mesh_scoping !=None:
self.inputs.mesh_scoping.connect(mesh_scoping)
if field !=None:
self.inputs.field.connect(field)
@staticmethod
def _spec():
spec = Specification(description="""compute the normals at the given nodes or element scoping based on the given mesh (first version, the element normal is only handled on the shell elements)""",
map_input_pin_spec={
0 : PinSpecification(name = "mesh", type_names=["abstract_meshed_region"], optional=True, document=""""""),
1 : PinSpecification(name = "mesh_scoping", type_names=["scoping"], optional=True, document=""""""),
3 : PinSpecification(name = "field", type_names=["field"], optional=True, document="""""")},
map_output_pin_spec={
0 : PinSpecification(name = "field", type_names=["field"], optional=False, document="""""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "normals_provider")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsNormals
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsNormals
"""
return super().outputs
#internal name: normals_provider
#scripting name: normals
class InputsNormals(_Inputs):
"""Intermediate class used to connect user inputs to normals operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.normals()
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_mesh_scoping = dpf.Scoping()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_field = dpf.Field()
>>> op.inputs.field.connect(my_field)
"""
def __init__(self, op: Operator):
super().__init__(normals._spec().inputs, op)
self._mesh = Input(normals._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._mesh)
self._mesh_scoping = Input(normals._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._mesh_scoping)
self._field = Input(normals._spec().input_pin(3), 3, op, -1)
self._inputs.append(self._field)
@property
def mesh(self):
"""Allows to connect mesh input to the operator
Parameters
----------
my_mesh : MeshedRegion,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.normals()
>>> op.inputs.mesh.connect(my_mesh)
>>> #or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
@property
def mesh_scoping(self):
"""Allows to connect mesh_scoping input to the operator
Parameters
----------
my_mesh_scoping : Scoping,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.normals()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> #or
>>> op.inputs.mesh_scoping(my_mesh_scoping)
"""
return self._mesh_scoping
@property
def field(self):
"""Allows to connect field input to the operator
Parameters
----------
my_field : Field,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.normals()
>>> op.inputs.field.connect(my_field)
>>> #or
>>> op.inputs.field(my_field)
"""
return self._field
class OutputsNormals(_Outputs):
"""Intermediate class used to get outputs from normals operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.normals()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(normals._spec().outputs, op)
self._field = Output(normals._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def field(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.normals()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
return self._field
| 30.953052
| 203
| 0.586683
|
4a195ff50a6f5136265516dbb640e9a2f5a483fd
| 10,019
|
py
|
Python
|
Collections-a-installer/community-general-2.4.0/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: xenserver_guest_powerstate
short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool
description: >
This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine.
author:
- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
notes:
- Minimal supported version of XenServer is 5.6.
- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
requirements:
- python >= 2.6
- XenAPI
options:
state:
description:
- Specify the state VM should be in.
- If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
- If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
type: str
default: present
choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
name:
description:
- Name of the VM to manage.
- VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
- This parameter is case sensitive.
type: str
aliases: [ name_label ]
uuid:
description:
- UUID of the VM to manage if known. This is XenServer's unique identifier.
- It is required if name is not unique.
type: str
wait_for_ip_address:
description:
- Wait until XenServer detects an IP address for the VM.
- This requires XenServer Tools to be preinstalled on the VM to work properly.
type: bool
default: no
state_change_timeout:
description:
- 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).'
- If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
- In case of timeout, module will generate an error message.
type: int
default: 0
extends_documentation_fragment:
- community.general.xenserver.documentation
'''
EXAMPLES = r'''
- name: Power on VM
community.general.xenserver_guest_powerstate:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
name: testvm_11
state: powered-on
delegate_to: localhost
register: facts
'''
RETURN = r'''
instance:
description: Metadata about the VM
returned: always
type: dict
sample: {
"cdrom": {
"type": "none"
},
"customization_agent": "native",
"disks": [
{
"name": "windows-template-testing-0",
"name_desc": "",
"os_device": "xvda",
"size": 42949672960,
"sr": "Local storage",
"sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
"vbd_userdevice": "0"
},
{
"name": "windows-template-testing-1",
"name_desc": "",
"os_device": "xvdb",
"size": 42949672960,
"sr": "Local storage",
"sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
"vbd_userdevice": "1"
}
],
"domid": "56",
"folder": "",
"hardware": {
"memory_mb": 8192,
"num_cpu_cores_per_socket": 2,
"num_cpus": 4
},
"home_server": "",
"is_template": false,
"name": "windows-template-testing",
"name_desc": "",
"networks": [
{
"gateway": "192.168.0.254",
"gateway6": "fc00::fffe",
"ip": "192.168.0.200",
"ip6": [
"fe80:0000:0000:0000:e9cb:625a:32c5:c291",
"fc00:0000:0000:0000:0000:0000:0000:0001"
],
"mac": "ba:91:3a:48:20:76",
"mtu": "1500",
"name": "Pool-wide network associated with eth1",
"netmask": "255.255.255.128",
"prefix": "25",
"prefix6": "64",
"vif_device": "0"
}
],
"other_config": {
"base_template_name": "Windows Server 2016 (64-bit)",
"import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
"install-methods": "cdrom",
"instant": "true",
"mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
},
"platform": {
"acpi": "1",
"apic": "true",
"cores-per-socket": "2",
"device_id": "0002",
"hpet": "true",
"nx": "true",
"pae": "true",
"timeoffset": "-25200",
"vga": "std",
"videoram": "8",
"viridian": "true",
"viridian_reference_tsc": "true",
"viridian_time_ref_count": "true"
},
"state": "poweredon",
"uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
"xenstore_data": {
"vm-data": ""
}
}
'''
import re
HAS_XENAPI = False
try:
import XenAPI
HAS_XENAPI = True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
gather_vm_params, gather_vm_facts, set_vm_power_state,
wait_for_vm_ip_address)
class XenServerVM(XenServerObject):
"""Class for managing XenServer VM.
Attributes:
vm_ref (str): XAPI reference to VM.
vm_params (dict): A dictionary with VM parameters as returned
by gather_vm_params() function.
"""
def __init__(self, module):
"""Inits XenServerVM using module parameters.
Args:
module: Reference to Ansible module object.
"""
super(XenServerVM, self).__init__(module)
self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
self.gather_params()
def gather_params(self):
"""Gathers all VM parameters available in XAPI database."""
self.vm_params = gather_vm_params(self.module, self.vm_ref)
def gather_facts(self):
"""Gathers and returns VM facts."""
return gather_vm_facts(self.module, self.vm_params)
def set_power_state(self, power_state):
"""Controls VM power state."""
state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
# If state has changed, update vm_params.
if state_changed:
self.vm_params['power_state'] = current_state.capitalize()
return state_changed
def wait_for_ip_address(self):
"""Waits for VM to acquire an IP address."""
self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
def main():
argument_spec = xenserver_common_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']),
name=dict(type='str', aliases=['name_label']),
uuid=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
state_change_timeout=dict(type='int', default=0),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['name', 'uuid'],
],
)
result = {'failed': False, 'changed': False}
# Module will exit with an error message if no VM is found.
vm = XenServerVM(module)
# Set VM power state.
if module.params['state'] != "present":
result['changed'] = vm.set_power_state(module.params['state'])
if module.params['wait_for_ip_address']:
vm.wait_for_ip_address()
result['instance'] = vm.gather_facts()
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
| 36.97048
| 157
| 0.60525
|
4a19605f5c10d236cd1cc64147732c13e115a98a
| 11,400
|
py
|
Python
|
test/functional/tests/cache_ops/test_cleaning_policy_operation.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | 1
|
2022-01-23T23:50:23.000Z
|
2022-01-23T23:50:23.000Z
|
test/functional/tests/cache_ops/test_cleaning_policy_operation.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-21T22:05:26.000Z
|
2022-03-21T22:05:26.000Z
|
test/functional/tests/cache_ops/test_cleaning_policy_operation.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import time
import pytest
from datetime import timedelta
from api.cas import casadm
from api.cas.cache_config import (
CacheMode,
CleaningPolicy,
FlushParametersAcp,
FlushParametersAlru,
Time,
)
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from core.test_run import TestRun
from test_utils.size import Size, Unit
from test_utils.os_utils import Udev, sync
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
cores_count = 4
io_size = Size(10000, Unit.Blocks4096)
# time_to_wait in seconds
# For 4 cores and io_size = 10000 Blocks4096, 30 seconds of waiting should be enough
# for CAS cleaner to flush enough data for test purposes.
time_to_wait = 30
# Name of CAS cleaner to search for in running processes:
cas_cleaner_process_name = "cas_cl_"
@pytest.mark.parametrize("cleaning_policy", CleaningPolicy)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cleaning_policies_in_write_back(cleaning_policy):
"""
title: Test for cleaning policy operation in Write-Back cache mode.
description: |
Check if ALRU, NOP and ACP cleaning policies preserve their
parameters when changed and if they flush dirty data properly
in Write-Back cache mode.
pass_criteria:
- Flush parameters preserve their values when changed.
- Dirty data is flushed or not according to the policy used.
"""
with TestRun.step("Partition cache and core devices"):
cache_dev, core_dev = storage_prepare()
Udev.disable()
with TestRun.step(
f"Start cache in Write-Back mode with {cleaning_policy} cleaning policy"
):
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WB, force=True)
set_cleaning_policy_and_params(cache, cleaning_policy)
with TestRun.step("Check for running CAS cleaner"):
if TestRun.executor.run(f"pgrep {cas_cleaner_process_name}").exit_code != 0:
TestRun.fail("CAS cleaner process is not running!")
with TestRun.step(f"Add {cores_count} cores to the cache"):
core = []
for i in range(cores_count):
core.append(cache.add_core(core_dev.partitions[i]))
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for i in range(cores_count):
fio.add_job().target(core[i].path)
fio.run()
time.sleep(3)
core_writes_before_wait_for_cleaning = (
cache.get_statistics().block_stats.core.writes
)
with TestRun.step(f"Wait {time_to_wait} seconds"):
time.sleep(time_to_wait)
with TestRun.step("Check write statistics for core device"):
core_writes_after_wait_for_cleaning = (
cache.get_statistics().block_stats.core.writes
)
check_cleaning_policy_operation(
cleaning_policy,
core_writes_before_wait_for_cleaning,
core_writes_after_wait_for_cleaning,
)
with TestRun.step("Stop all caches"):
casadm.stop_all_caches()
Udev.enable()
@pytest.mark.parametrize("cleaning_policy", CleaningPolicy)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cleaning_policies_in_write_through(cleaning_policy):
"""
title: Test for cleaning policy operation in Write-Through cache mode.
description: |
Check if ALRU, NOP and ACP cleaning policies preserve their
parameters when changed and if they flush dirty data properly
in Write-Through cache mode.
pass_criteria:
- Flush parameters preserve their values when changed.
- Dirty data is flushed or not according to the policy used.
"""
with TestRun.step("Partition cache and core devices"):
cache_dev, core_dev = storage_prepare()
Udev.disable()
with TestRun.step(
f"Start cache in Write-Through mode with {cleaning_policy} cleaning policy"
):
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WT, force=True)
set_cleaning_policy_and_params(cache, cleaning_policy)
with TestRun.step("Check for running CAS cleaner"):
if TestRun.executor.run(f"pgrep {cas_cleaner_process_name}").exit_code != 0:
TestRun.fail("CAS cleaner process is not running!")
with TestRun.step(f"Add {cores_count} cores to the cache"):
core = []
for i in range(cores_count):
core.append(cache.add_core(core_dev.partitions[i]))
with TestRun.step("Change cache mode to Write-Back"):
cache.set_cache_mode(CacheMode.WB)
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for i in range(cores_count):
fio.add_job().target(core[i].path)
fio.run()
time.sleep(3)
with TestRun.step("Change cache mode back to Write-Through"):
cache.set_cache_mode(CacheMode.WT, flush=False)
core_writes_before_wait_for_cleaning = (
cache.get_statistics().block_stats.core.writes
)
with TestRun.step(f"Wait {time_to_wait} seconds"):
time.sleep(time_to_wait)
with TestRun.step("Check write statistics for core device"):
core_writes_after_wait_for_cleaning = (
cache.get_statistics().block_stats.core.writes
)
check_cleaning_policy_operation(
cleaning_policy,
core_writes_before_wait_for_cleaning,
core_writes_after_wait_for_cleaning,
)
with TestRun.step("Stop all caches"):
casadm.stop_all_caches()
Udev.enable()
def storage_prepare():
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(1, Unit.GibiByte)])
core_dev = TestRun.disks["core"]
parts = [Size(2, Unit.GibiByte)] * cores_count
core_dev.create_partitions(parts)
return cache_dev, core_dev
def set_cleaning_policy_and_params(cache, cleaning_policy):
if cleaning_policy != CleaningPolicy.DEFAULT:
cache.set_cleaning_policy(cleaning_policy)
current_cleaning_policy = cache.get_cleaning_policy()
if current_cleaning_policy != cleaning_policy:
TestRun.LOGGER.error(
f"Cleaning policy is {current_cleaning_policy}, "
f"should be {cleaning_policy}"
)
if cleaning_policy == CleaningPolicy.alru:
alru_params = FlushParametersAlru()
alru_params.wake_up_time = Time(seconds=10)
alru_params.staleness_time = Time(seconds=2)
alru_params.flush_max_buffers = 100
alru_params.activity_threshold = Time(milliseconds=1000)
cache.set_params_alru(alru_params)
current_alru_params = cache.get_flush_parameters_alru()
if current_alru_params != alru_params:
failed_params = ""
if current_alru_params.wake_up_time != alru_params.wake_up_time:
failed_params += (
f"Wake Up time is {current_alru_params.wake_up_time}, "
f"should be {alru_params.wake_up_time}\n"
)
if current_alru_params.staleness_time != alru_params.staleness_time:
failed_params += (
f"Staleness Time is {current_alru_params.staleness_time}, "
f"should be {alru_params.staleness_time}\n"
)
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
failed_params += (
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, "
f"should be {alru_params.flush_max_buffers}\n"
)
if current_alru_params.activity_threshold != alru_params.activity_threshold:
failed_params += (
f"Activity Threshold is {current_alru_params.activity_threshold}, "
f"should be {alru_params.activity_threshold}\n"
)
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
if cleaning_policy == CleaningPolicy.acp:
acp_params = FlushParametersAcp()
acp_params.wake_up_time = Time(milliseconds=100)
acp_params.flush_max_buffers = 64
cache.set_params_acp(acp_params)
current_acp_params = cache.get_flush_parameters_acp()
if current_acp_params != acp_params:
failed_params = ""
if current_acp_params.wake_up_time != acp_params.wake_up_time:
failed_params += (
f"Wake Up time is {current_acp_params.wake_up_time}, "
f"should be {acp_params.wake_up_time}\n"
)
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
failed_params += (
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, "
f"should be {acp_params.flush_max_buffers}\n"
)
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
def fio_prepare():
fio = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.block_size(Size(4, Unit.KibiByte))
.size(io_size)
.read_write(ReadWrite.randwrite)
.direct(1)
)
return fio
def check_cleaning_policy_operation(
cleaning_policy,
core_writes_before_wait_for_cleaning,
core_writes_after_wait_for_cleaning,
):
if cleaning_policy == CleaningPolicy.alru:
if core_writes_before_wait_for_cleaning.value != 0:
TestRun.LOGGER.error(
"CAS cleaner started to clean dirty data right after IO! "
"According to ALRU parameters set in this test cleaner should "
"wait 10 seconds after IO before cleaning dirty data."
)
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
TestRun.LOGGER.error(
"ALRU cleaning policy is not working properly! "
"Core writes should increase in time while cleaning dirty data."
)
if cleaning_policy == CleaningPolicy.nop:
if (
core_writes_after_wait_for_cleaning.value != 0
or core_writes_before_wait_for_cleaning.value != 0
):
TestRun.LOGGER.error(
"NOP cleaning policy is not working properly! "
"There should be no core writes as there is no cleaning of dirty data."
)
if cleaning_policy == CleaningPolicy.acp:
if core_writes_before_wait_for_cleaning.value == 0:
TestRun.LOGGER.error(
"CAS cleaner did not start cleaning dirty data right after IO! "
"According to ACP policy cleaner should start "
"cleaning dirty data right after IO."
)
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
TestRun.LOGGER.error(
"ACP cleaning policy is not working properly! "
"Core writes should increase in time while cleaning dirty data."
)
| 39.041096
| 94
| 0.659474
|
4a196297c26f6c2ebe3d37c7910eb8267a6546e0
| 1,099
|
py
|
Python
|
0x0B-python-input_output/12-student.py
|
FatChicken277/holbertonschool-higher_level_programming
|
520d6310a5e2a874f8c5f5185d0fb769b6412e7c
|
[
"CNRI-Python"
] | null | null | null |
0x0B-python-input_output/12-student.py
|
FatChicken277/holbertonschool-higher_level_programming
|
520d6310a5e2a874f8c5f5185d0fb769b6412e7c
|
[
"CNRI-Python"
] | null | null | null |
0x0B-python-input_output/12-student.py
|
FatChicken277/holbertonschool-higher_level_programming
|
520d6310a5e2a874f8c5f5185d0fb769b6412e7c
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/python3
"""This module contains a class that class Student
that defines a student. (based on 11-student.py)
"""
class Student():
"""Class Student that defines a student.
(based on 11-student.py)
"""
def __init__(self, first_name, last_name, age):
"""Instantiation with first_name, last_name and age.
Arguments:
first_name {str} -- student first name.
last_name {str} -- student last name.
age {int} -- student age.
"""
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self, attrs=None):
"""Returns the dictionary description with simple data
structure (list, dictionary, string, integer and boolean)
for JSON serialization of an object.
Returns:
dict -- dictionary.
"""
dic = {}
if attrs is None:
return self.__dict__
for attr in attrs:
if hasattr(self, attr):
dic[attr] = getattr(self, attr)
return dic
| 28.179487
| 69
| 0.571429
|
4a196317702587c0a13e783e2e1935468649156c
| 451
|
py
|
Python
|
settings/fixed_params.py
|
wesley1001/trading-momentum-transformer
|
7d6251b32b82cb0f6bf7abb5504a989417469b7b
|
[
"MIT"
] | 27
|
2022-01-24T01:52:13.000Z
|
2022-03-30T04:18:29.000Z
|
settings/fixed_params.py
|
wesley1001/trading-momentum-transformer
|
7d6251b32b82cb0f6bf7abb5504a989417469b7b
|
[
"MIT"
] | 1
|
2022-03-23T11:27:46.000Z
|
2022-03-28T04:37:54.000Z
|
settings/fixed_params.py
|
kieranjwood/trading-momentum-transformer
|
d7df00bba31f5728e1c8bc735da0208892487142
|
[
"MIT"
] | 21
|
2022-02-15T09:27:20.000Z
|
2022-03-30T07:38:09.000Z
|
MODLE_PARAMS = {
"architecture": "TFT",
"total_time_steps": 252,
"early_stopping_patience": 25,
"multiprocessing_workers": 32,
"num_epochs": 300,
"early_stopping_patience": 25,
"fill_blank_dates": False,
"split_tickers_individually": True,
"random_search_iterations": 50 ,
"evaluate_diversified_val_sharpe": True,
"train_valid_ratio": 0.90,
"time_features": False,
"force_output_sharpe_length": 0,
}
| 30.066667
| 44
| 0.689579
|
4a1963dba1f2e88add572333dbc706db9555deb5
| 2,614
|
py
|
Python
|
twitterscraper/main.py
|
samanthaklee/twitterscraper
|
c6ec256de26bd24410e30daa56a998958a450c78
|
[
"MIT"
] | 1
|
2019-08-12T18:34:58.000Z
|
2019-08-12T18:34:58.000Z
|
twitterscraper/main.py
|
samanthaklee/twitterscraper
|
c6ec256de26bd24410e30daa56a998958a450c78
|
[
"MIT"
] | null | null | null |
twitterscraper/main.py
|
samanthaklee/twitterscraper
|
c6ec256de26bd24410e30daa56a998958a450c78
|
[
"MIT"
] | 1
|
2019-10-08T02:38:09.000Z
|
2019-10-08T02:38:09.000Z
|
"""
This is a command line application that allows you to scrape twitter!
"""
import collections
import json
from argparse import ArgumentParser
from datetime import datetime
from os.path import isfile
from json import dump
import logging
from twitterscraper import query_tweets
from twitterscraper.query import query_all_tweets
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, '__json__'):
return obj.__json__()
elif isinstance(obj, collections.Iterable):
return list(obj)
elif isinstance(obj, datetime):
return obj.isoformat()
elif hasattr(obj, '__getitem__') and hasattr(obj, 'keys'):
return dict(obj)
elif hasattr(obj, '__dict__'):
return {member: getattr(obj, member)
for member in dir(obj)
if not member.startswith('_') and
not hasattr(getattr(obj, member), '__call__')}
return json.JSONEncoder.default(self, obj)
def main():
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
try:
parser = ArgumentParser(
description=__doc__
)
parser.add_argument("query", type=str, help="Advanced twitter query")
parser.add_argument("-o", "--output", type=str, default="tweets.json",
help="Path to a JSON file to store the gathered "
"tweets to.")
parser.add_argument("-l", "--limit", type=int, default=None,
help="Number of minimum tweets to gather.")
parser.add_argument("-a", "--all", action='store_true',
help="Set this flag if you want to get all tweets "
"in the history of twitter. This may take a "
"while but also activates parallel tweet "
"gathering. The number of tweets however, "
"will be capped at around 100000 per 10 "
"days.")
args = parser.parse_args()
if isfile(args.output):
logging.error("Output file already exists! Aborting.")
exit(-1)
if args.all:
tweets = query_all_tweets(args.query)
else:
tweets = query_tweets(args.query, args.limit)
with open(args.output, "w") as output:
dump(tweets, output, cls=JSONEncoder)
except KeyboardInterrupt:
logging.info("Program interrupted by user. Quitting...")
| 36.816901
| 80
| 0.573068
|
4a1964e55729b62cb6465cd65b0e97710644b5f2
| 386
|
py
|
Python
|
venv/Scripts/pip3.7-script.py
|
Galeedondon/-shopee
|
c4b1205a4ce1cd387ff6f2f2071115b13e4cc8b5
|
[
"Unlicense"
] | null | null | null |
venv/Scripts/pip3.7-script.py
|
Galeedondon/-shopee
|
c4b1205a4ce1cd387ff6f2f2071115b13e4cc8b5
|
[
"Unlicense"
] | null | null | null |
venv/Scripts/pip3.7-script.py
|
Galeedondon/-shopee
|
c4b1205a4ce1cd387ff6f2f2071115b13e4cc8b5
|
[
"Unlicense"
] | null | null | null |
#!Z:\DEMO\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| 29.692308
| 70
| 0.650259
|
4a19650ff2e1d7ca307eb09de0bb87a6b0a77dcd
| 4,057
|
py
|
Python
|
tests/kafkatest/services/log_compaction_tester.py
|
BoYiZhang/kafka-2.4.0-src
|
752b76f7f48ca4c5ea20770fd990293b1b28fce4
|
[
"Apache-2.0"
] | 126
|
2018-08-31T21:47:30.000Z
|
2022-03-11T10:01:31.000Z
|
tests/kafkatest/services/log_compaction_tester.py
|
BoYiZhang/kafka-2.4.0-src
|
752b76f7f48ca4c5ea20770fd990293b1b28fce4
|
[
"Apache-2.0"
] | 75
|
2019-03-07T20:24:18.000Z
|
2022-03-31T02:14:37.000Z
|
tests/kafkatest/services/log_compaction_tester.py
|
BoYiZhang/kafka-2.4.0-src
|
752b76f7f48ca4c5ea20770fd990293b1b28fce4
|
[
"Apache-2.0"
] | 46
|
2018-09-13T07:27:19.000Z
|
2022-03-23T17:49:13.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin, CORE_LIBS_JAR_NAME, CORE_DEPENDANT_TEST_LIBS_JAR_NAME
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH
class LogCompactionTester(KafkaPathResolverMixin, BackgroundThreadService):
OUTPUT_DIR = "/mnt/logcompaction_tester"
LOG_PATH = os.path.join(OUTPUT_DIR, "logcompaction_tester_stdout.log")
VERIFICATION_STRING = "Data verification is completed"
logs = {
"tool_logs": {
"path": LOG_PATH,
"collect_default": True}
}
def __init__(self, context, kafka, security_protocol="PLAINTEXT", stop_timeout_sec=30):
super(LogCompactionTester, self).__init__(context, 1)
self.kafka = kafka
self.security_protocol = security_protocol
self.security_config = SecurityConfig(self.context, security_protocol)
self.stop_timeout_sec = stop_timeout_sec
self.log_compaction_completed = False
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % LogCompactionTester.OUTPUT_DIR)
cmd = self.start_cmd(node)
self.logger.info("LogCompactionTester %d command: %s" % (idx, cmd))
self.security_config.setup_node(node)
for line in node.account.ssh_capture(cmd):
self.logger.debug("Checking line:{}".format(line))
if line.startswith(LogCompactionTester.VERIFICATION_STRING):
self.log_compaction_completed = True
def start_cmd(self, node):
core_libs_jar = self.path.jar(CORE_LIBS_JAR_NAME, DEV_BRANCH)
core_dependant_test_libs_jar = self.path.jar(CORE_DEPENDANT_TEST_LIBS_JAR_NAME, DEV_BRANCH)
cmd = "for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_libs_jar
cmd += " for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_dependant_test_libs_jar
cmd += " export CLASSPATH;"
cmd += self.path.script("kafka-run-class.sh", node)
cmd += " %s" % self.java_class_name()
cmd += " --bootstrap-server %s --messages 1000000 --sleep 20 --duplicates 10 --percent-deletes 10" % (self.kafka.bootstrap_servers(self.security_protocol))
cmd += " 2>> %s | tee -a %s &" % (self.logs["tool_logs"]["path"], self.logs["tool_logs"]["path"])
return cmd
def stop_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=True,
allow_fail=True)
stopped = self.wait_node(node, timeout_sec=self.stop_timeout_sec)
assert stopped, "Node %s: did not stop within the specified timeout of %s seconds" % \
(str(node.account), str(self.stop_timeout_sec))
def clean_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=False,
allow_fail=True)
node.account.ssh("rm -rf %s" % LogCompactionTester.OUTPUT_DIR, allow_fail=False)
def java_class_name(self):
return "kafka.tools.LogCompactionTester"
@property
def is_done(self):
return self.log_compaction_completed
| 45.58427
| 163
| 0.700271
|
4a1965df56509f833a3dff843ef255f091a80ece
| 254
|
py
|
Python
|
conversion.py
|
cagis2019/conversion_tofix
|
d27f5df148bec658b872bf767b1aeed798c1720c
|
[
"Unlicense"
] | 2
|
2019-08-05T21:06:58.000Z
|
2020-08-03T17:52:23.000Z
|
conversion.py
|
cagis2019/conversion_tofix
|
d27f5df148bec658b872bf767b1aeed798c1720c
|
[
"Unlicense"
] | 7
|
2017-08-01T20:41:42.000Z
|
2020-08-03T19:01:34.000Z
|
conversion.py
|
cagis2019/conversion_tofix
|
d27f5df148bec658b872bf767b1aeed798c1720c
|
[
"Unlicense"
] | 92
|
2017-08-01T18:17:35.000Z
|
2021-08-02T21:54:00.000Z
|
"""Conversion tools for Python"""
def dollars2cents(dollars):
"""Convert dollars to cents"""
cents = dollars * 100
return cents
def gallons2liters(gallons):
"""Convert gallons to liters"""
liters = gallons * 3.785
return liters
| 21.166667
| 35
| 0.665354
|
4a19660efe911a853cb00269ec81552d0f6b5935
| 749
|
py
|
Python
|
example_python_client.py
|
Fifczak/FeedbacksAPI
|
b35863e3b6cca6c07077571301bcd6d15c7c2c83
|
[
"MIT"
] | null | null | null |
example_python_client.py
|
Fifczak/FeedbacksAPI
|
b35863e3b6cca6c07077571301bcd6d15c7c2c83
|
[
"MIT"
] | null | null | null |
example_python_client.py
|
Fifczak/FeedbacksAPI
|
b35863e3b6cca6c07077571301bcd6d15c7c2c83
|
[
"MIT"
] | null | null | null |
from requests.auth import HTTPBasicAuth
from datetime import datetime
import requests
import json
url_get = "http://192.168.10.232:444/api/feedbacks"
url_put = "http://192.168.10.232:444/api/feedbacks/put"
headers = {"apikey": "7B5zIqmRGXmrJTFmKa99vcit"}
def get_feedbacks():
req = requests.get(url_get, headers=headers)# , files=files) , headers=headers
print(req.text)
def send_feedback(remark_id, feedback_text):
feedback_data = {'im_remark_id' : remark_id, 'feedback' : feedback_text}
feedback_data = json.dumps(feedback_data)
r = requests.post(url_put, headers=headers, json = feedback_data)
print(r.text)
#get_feedbacks()
#send_feedbacks('12123123', 'One more Test feedback {}'.format(datetime.now()))
| 34.045455
| 82
| 0.728972
|
4a1966f170e3d8ebd5ed66e0f5730c71c075e448
| 21,075
|
py
|
Python
|
failed-trials/handwrite.py
|
hiroki-kyoto/alice
|
273e7f3d647c685db65d224baacd21e9f0e361f9
|
[
"MIT"
] | null | null | null |
failed-trials/handwrite.py
|
hiroki-kyoto/alice
|
273e7f3d647c685db65d224baacd21e9f0e361f9
|
[
"MIT"
] | null | null | null |
failed-trials/handwrite.py
|
hiroki-kyoto/alice
|
273e7f3d647c685db65d224baacd21e9f0e361f9
|
[
"MIT"
] | null | null | null |
# handwrite.py
import numpy as np
import tensorflow as tf
from PIL import Image
# canvas setting: canvas height and width, and pen radius
h, w = 256, 256
r = w // 16
color_bound = 0.5
sim_c = 0.5 # the speed of light in simulation: the maximum of speed enabled
sim_d = 1.0/w # the minimum of simulation in space
sim_t = sim_d / sim_c
num_moves = 128
def depth2color(depth):
if depth < color_bound:
return depth / color_bound
else:
return 1.0
def dot(bmp, x, y, p):
x_int = int(x * w)
y_int = int(y * h)
p_int = int(p * r)
if p > 0:
for i in range(y_int - p_int, y_int + p_int + 1):
for j in range(x_int - p_int, x_int + p_int + 1):
if 0 <= i < h and 0 <= j < w:
if (i - y_int) * (i - y_int) + (j - x_int) * (j - x_int) <= p_int * p_int:
bmp[i, j] = np.minimum(1.0, bmp[i, j] + depth2color(p))
# draw black lines on white sheet
def update_sheet(bmp_, pos_, vel_, mov_):
# start_ includes initial position, pressure.
# moves includes: acceleration over position and pressure.
# the velocity and position over sheet surface and along the direction
# erected to the sheet.
x, y, p = pos_[0], pos_[1], pos_[2]
v_x, v_y, v_p = vel_[0], vel_[1], vel_[2]
a_x, a_y, a_p = mov_[0], mov_[1], mov_[2]
last_x = x
last_y = y
for t in np.arange(0, 1, sim_t):
x_t = x + v_x * t + 0.5 * a_x * t * t
y_t = y + v_y * t + 0.5 * a_y * t * t
p_t = p + v_p * t + 0.5 * a_p * t * t
if x_t != last_x or y_t == last_y:
dot(bmp_, x_t, y_t, p_t)
last_x = x_t
last_y = y_t
x = x + v_x + 0.5 * a_x
y = y + v_y + 0.5 * a_y
p = p + v_p + 0.5 * a_p
v_x = v_x + a_x
v_y = v_y + a_y
v_p = v_p + a_p
if p > 1 or p < 0:
v_p = 0
p = np.minimum(np.maximum(p, 0), 1)
if x > 1 or x < 0:
v_x = 0
x = np.minimum(np.maximum(x, 0), 1)
if y > 1 or y < 0:
v_y = 0
y = np.minimum(np.maximum(y, 0), 1)
pos_[0] = x
pos_[1] = y
pos_[2] = p
vel_[0] = v_x
vel_[1] = v_y
vel_[2] = v_p
def act_fn():
return tf.nn.leaky_relu
def ini_fn():
return tf.initializers.truncated_normal(0.0, 0.1)
def dense_block(input_, dims, norm):
if norm:
out_ = tf.layers.batch_normalization(input_)
else:
out_ = input_
for i in range(len(dims)-1):
out_ = tf.layers.dense(
out_,
dims[i],
act_fn(),
True,
kernel_initializer=ini_fn())
out_ = tf.layers.dense(
out_,
dims[-1],
None,
True,
kernel_initializer=ini_fn())
return out_
def conv_block(input_, filters, strides, norm):
if norm:
out_ = tf.layers.batch_normalization(input_)
else:
out_ = input_
for i in range(len(filters)-1):
out_ = tf.layers.conv2d(
out_,
filters[i],
3,
strides[i],
'same',
activation=act_fn(),
kernel_initializer=ini_fn())
out_ = tf.layers.conv2d(
out_,
filters[-1],
3,
strides[-1],
'same',
kernel_initializer=ini_fn())
return out_
def deconv_block(input_, filters, strides, norm):
if norm:
out_ = tf.layers.batch_normalization(input_)
else:
out_ = input_
for i in range(len(filters)-1):
out_ = tf.layers.conv2d(
out_,
filters[i],
3,
strides[i],
'same',
activation=act_fn(),
kernel_initializer=ini_fn())
out_ = tf.layers.conv2d_transpose(
out_,
filters[-1],
3,
strides[-1],
'same',
kernel_initializer=ini_fn())
return out_
def action_encoder(t_action):
with tf.variable_scope(
name_or_scope='action/encoder',
reuse=tf.AUTO_REUSE):
t_out = dense_block(t_action, [8, 16], False)
t_out = dense_block(t_out, [8, 16], True)
return t_out
def states_encoder(t_states):
with tf.variable_scope(
name_or_scope='states/encoder',
reuse=tf.AUTO_REUSE):
# reshape into one-dimension vector in such form:
# (v_x, v_y, v_p, x, y, p), a 6-item group.
t_out = tf.reshape(t_states, shape=[1, 1, 1, 6])
t_out = dense_block(t_out, [8, 16], False)
t_out = dense_block(t_out, [8, 16], True)
return t_out
def observ_encoder(t_observ):
with tf.variable_scope(
name_or_scope='observ/encoder',
reuse=tf.AUTO_REUSE):
t_out = conv_block(t_observ, [8, 16], [2, 2], False)
t_out = conv_block(t_out, [8, 16], [1, 2], True)
t_out = conv_block(t_out, [8, 4, 1], [1, 1, 1], True)
shape_ = t_out.shape.as_list()
t_out = tf.reshape(t_out, shape=[1, 1, 1, shape_[1] * shape_[2]])
return t_out
def merge_features(t_feat_action, t_feat_states, t_feat_observ):
with tf.variable_scope(
name_or_scope='merge',
reuse=tf.AUTO_REUSE):
t_out = tf.concat(
[t_feat_action, t_feat_states, t_feat_observ],
axis=-1)
t_out = dense_block(t_out, [8, 16], True)
t_out = dense_block(t_out, [8, 16], True)
return t_out
def states_decoder(t_feat_merged):
with tf.variable_scope(
name_or_scope='states/decoder',
reuse=tf.AUTO_REUSE):
t_out = dense_block(t_feat_merged, [8, 16], True)
t_out = dense_block(t_out, [8, 6], False)
# reshape into 2-dimension array in such form:
# [[v_x, v_y, v_p], [x, y, p]], a 2x3 array.
t_out = tf.reshape(t_out, shape=[1, 1, 2, 3])
return t_out
def observ_decoder(t_feat_merged):
with tf.variable_scope(
name_or_scope='observ/decoder',
reuse=tf.AUTO_REUSE):
t_out = tf.reshape(t_feat_merged, shape=[1, 4, 4, 1])
t_out = deconv_block(t_out, [4, 1], [1, 2], True)
t_out = deconv_block(t_out, [4, 1], [1, 2], True)
t_out = deconv_block(t_out, [4, 1], [1, 2], True)
t_out = deconv_block(t_out, [4, 1], [1, 2], True)
return t_out
def visualize_bmp(bmp):
Image.fromarray(np.uint8((1 - bmp) * 255)).show()
def save_bmp(bmp, itr, dir_):
Image.fromarray(np.uint8((1 - bmp) * 255)).save('%s/%d.jpg' % (dir_, itr))
def merge_bmp(bmp_ori, bmp_left, bmp_right):
seg_width = 3
seg_band = np.zeros([bmp_ori.shape[0], seg_width, 3])
seg_band[:, :, 0] = 0.0
seg_band[:, :, 1] = 1.0
seg_band[:, :, 2] = 1.0
bmp_ori = np.stack([bmp_ori, bmp_ori, bmp_ori], axis=-1)
bmp_left = np.stack([bmp_left, bmp_left, bmp_left], axis=-1)
bmp_right = np.stack([bmp_right, bmp_right, bmp_right], axis=-1)
return np.concatenate((bmp_ori, seg_band, bmp_left, seg_band, bmp_right), axis=1)
def expand_dims(tensor, axises):
for i in range(len(axises)):
tensor = np.expand_dims(tensor, axis=axises[i])
return tensor
def cut(bmp):
return np.maximum(np.minimum(bmp, 1), 0)
class Simulator:
def __init__(self):
self.t_action = tf.placeholder(dtype=tf.float32, shape=[1, 1, 1, 3])
self.t_states = tf.placeholder(dtype=tf.float32, shape=[1, 1, 2, 3])
self.t_observ = tf.placeholder(dtype=tf.float32, shape=[1, h, w, 1])
self.t_next_states = tf.placeholder(dtype=tf.float32, shape=[1, 1, 2, 3])
self.t_next_observ = tf.placeholder(dtype=tf.float32, shape=[1, h, w, 1])
# build the encoder model
t_feat_action = action_encoder(self.t_action)
t_feat_states = states_encoder(self.t_states)
t_feat_observ = observ_encoder(self.t_observ)
print(t_feat_action.shape)
print(t_feat_states.shape)
print(t_feat_observ.shape)
t_feat_merged = merge_features(t_feat_action, t_feat_states, t_feat_observ)
print(t_feat_merged.shape)
# build the decoder model
self.t_pred_states = states_decoder(t_feat_merged)
self.t_pred_observ = observ_decoder(t_feat_merged)
print(self.t_pred_states.shape)
print(self.t_pred_observ.shape)
self.t_loss_states = tf.reduce_mean(
tf.abs(self.t_pred_states - self.t_next_states))
self.t_loss_observ = tf.reduce_mean(
tf.abs(self.t_pred_observ - self.t_next_observ))
alpha = 1.0
self.t_loss_global = self.t_loss_states * alpha + self.t_loss_observ * (1 - alpha)
self.t_opt = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(self.t_loss_global)
self.sess = tf.Session()
def train(self, model_path, dump_path):
saver = tf.train.Saver()
if tf.train.checkpoint_exists(model_path):
saver.restore(self.sess, model_path)
else:
self.sess.run(tf.global_variables_initializer())
train_step = 100000
reset_prob = 0.01
bmp = np.zeros([h, w], dtype=np.float32)
bmp_last = np.zeros([h, w], dtype=np.float32)
pos = np.random.rand(3)
vel = np.random.rand(3)
states = np.stack([vel, pos], axis=0)
states_last = np.copy(states)
loss_s_av = 0
loss_o_av = 0
for i in range(train_step):
if np.random.rand() < reset_prob:
bmp[:, :] = 0
pos = np.random.rand(3)
vel = np.random.rand(3)
states[0, :] = vel
states[1, :] = pos
bmp_last[:, :] = bmp[:, :]
states_last[:, :] = states[:, :]
action_ = np.random.rand(3) - 0.5
action_[:2] = 0.05 * action_[:2]
action_[2] = 0.5 * action_[2]
update_sheet(bmp, pos, vel, action_)
states[0, :] = vel
states[1, :] = pos
pred, _, loss_s, loss_o = self.sess.run(
[self.t_pred_observ,
self.t_opt,
self.t_loss_states,
self.t_loss_observ],
feed_dict={
self.t_action: expand_dims(action_, axises=[0, 0, 0]),
self.t_states: expand_dims(states_last, axises=[0, 0]),
self.t_next_states: expand_dims(states, axises=[0, 0]),
self.t_observ: expand_dims(bmp_last, axises=[0, -1]),
self.t_next_observ: expand_dims(bmp, axises=[0, -1]),
}
)
m = 100.0
if i < m:
loss_s_av = loss_s_av * (i / m) + loss_s * (1 - i / m)
loss_o_av = loss_o_av * (i / m) + loss_o * (1 - i / m)
else:
loss_s_av = loss_s_av * ((m - 1) / m) + loss_s * (1 / m)
loss_o_av = loss_o_av * ((m - 1) / m) + loss_o * (1 / m)
if i % 1000 == 0:
print("Itr=%d States=%.5f Observ=%.5f" % (i, loss_s_av, loss_o_av))
bmp_merged = merge_bmp(bmp, cut(pred[0, :, :, 0]))
save_bmp(bmp_merged, i, dump_path)
# print('acceleration=%s' % str(action_))
# print('previous velocity=%s' % str(states_last[0, :]))
# print('previous position=%s' % str(states_last[1, :]))
# print('velocity=%s' % str(states[0, :]))
# print('position=%s' % str(states[1, :]))
saver.save(self.sess, model_path)
def load(self, model_path):
pass
def test(self, samples):
pass
class StatePredictor:
def __init__(self):
self.t_action = tf.placeholder(dtype=tf.float32, shape=[1, 3])
self.t_states = tf.placeholder(dtype=tf.float32, shape=[2, 3])
self.t_next_states = tf.placeholder(dtype=tf.float32, shape=[2, 3])
t_feat = tf.concat((self.t_action, self.t_states), axis=0)
t_feat = tf.reshape(t_feat, shape=[1, 9])
t_feat = tf.layers.dense(
t_feat,
8,
act_fn(),
True,
kernel_initializer=ini_fn())
t_feat = tf.layers.dense(
t_feat,
16,
act_fn(),
True,
kernel_initializer=ini_fn())
t_feat = tf.layers.dense(
t_feat,
6,
act_fn(),
True,
kernel_initializer=ini_fn())
self.t_pred_states = tf.reshape(t_feat, shape=[2, 3])
self.t_loss = tf.reduce_max(tf.abs(self.t_pred_states - self.t_next_states))
self.t_opt = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(self.t_loss)
self.sess = tf.Session()
def train(self, model_path, dump_path):
saver = tf.train.Saver()
if tf.train.checkpoint_exists(model_path):
saver.restore(self.sess, model_path)
else:
self.sess.run(tf.global_variables_initializer())
train_step = 1000000
reset_prob = 0.01
pos = np.random.rand(3)
vel = np.random.rand(3)
states = np.stack([vel, pos], axis=0)
states_last = np.copy(states)
loss_cache = np.zeros([1000])
for i in range(train_step):
if np.random.rand() < reset_prob:
pos = np.random.rand(3)
vel = np.random.rand(3)
states[0, :] = vel[:]
states[1, :] = pos[:]
states_last[:, :] = states[:, :]
action_ = np.random.rand(3) - 0.5
action_[:2] = 0.1 * action_[:2]
action_[2] = 0.5 * action_[2]
# update the states with physical rules
pos = pos + vel + 0.5 * action_
vel = vel + action_
valid_mask = np.float32(pos >= 0)
valid_mask = valid_mask * np.float32(pos <= 1)
vel = vel * valid_mask
pos = np.maximum(np.minimum(pos, 1), 0)
states[0, :] = vel[:]
states[1, :] = pos[:]
pred, _, loss = self.sess.run(
[
self.t_pred_states,
self.t_opt,
self.t_loss
],
feed_dict={
self.t_action: expand_dims(action_, axises=[0]),
self.t_states: states_last,
self.t_next_states: states
}
)
loss_cache[i%len(loss_cache)] = loss
if i % 1000 == 0:
loss_mean = np.mean(loss_cache)
loss_vari = np.sqrt(np.sum(np.square(loss_cache - loss_mean)) / (len(loss_cache) - 1))
print("Itr=%d Loss=%.5f(+/-%.5f)" % (i, loss_mean, loss_vari))
print('velocity: %s - %s' % (str(states[0, :]), str(pred[0, :])))
print('position: %s - %s' % (str(states[1, :]), str(pred[1, :])))
saver.save(self.sess, model_path)
# pos: the input position
# vel: the input velocity
# acc: tge input acceleration
def coordconv(pos, vel, acc):
h, w = x.shape.as_list()[1], x.shape.as_list()[2]
rows = np.linspace(0, 1, h)
cols = np.linspace(0, 1, w)
rows, cols = np.meshgrid(rows, cols)
coords = np.stack([rows, cols], axis=-1)
spots = 1 / (1 + tf.reduce_sum(tf.square(coords - pos), axis=-1, keep_dims=True))
class ObservationPredictor:
def __init__(self):
self.t_action = tf.placeholder(dtype=tf.float32, shape=[1, 3])
self.t_states = tf.placeholder(dtype=tf.float32, shape=[2, 3])
self.t_observ = tf.placeholder(dtype=tf.float32, shape=[1, h, w, 1])
self.t_next_observ = tf.placeholder(dtype=tf.float32, shape=[1, h, w, 1])
t_feat = tf.concat((self.t_action, self.t_states), axis=0)
t_feat = tf.reshape(t_feat, shape=[1, 9])
t_feat = tf.layers.dense(
t_feat,
8,
act_fn(),
True,
kernel_initializer=ini_fn())
t_feat = tf.layers.dense(
t_feat,
16,
act_fn(),
True,
kernel_initializer=ini_fn())
t_feat = tf.layers.dense(
t_feat,
64,
act_fn(),
True,
kernel_initializer=ini_fn())
# convert into a image
t_feat = tf.reshape(t_feat, [1, 8, 8, 1])
t_feat = tf.image.resize_bilinear(t_feat, [h, w])
# t_feat = tf.layers.conv2d_transpose(
# inputs=t_feat,
# filters=4,
# kernel_size=3,
# strides=2,
# padding='same',
# activation=act_fn(),
# kernel_initializer=ini_fn())
# t_feat = tf.layers.conv2d_transpose(
# inputs=t_feat,
# filters=4,
# kernel_size=3,
# strides=2,
# padding='same',
# activation=act_fn(),
# kernel_initializer=ini_fn())
# t_feat = tf.layers.conv2d_transpose(
# inputs=t_feat,
# filters=4,
# kernel_size=3,
# strides=2,
# padding='same',
# activation=act_fn(),
# kernel_initializer=ini_fn())
# t_feat = tf.layers.conv2d_transpose(
# inputs=t_feat,
# filters=1,
# kernel_size=3,
# strides=2,
# padding='same',
# activation=act_fn(),
# kernel_initializer=ini_fn())
self.t_pred_observ = tf.minimum(t_feat + self.t_observ, 1.0)
self.t_loss = tf.reduce_sum(tf.abs(self.t_pred_observ - self.t_next_observ))
self.t_opt = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(self.t_loss)
self.sess = tf.Session()
def train(self, model_path, dump_path):
saver = tf.train.Saver()
if tf.train.checkpoint_exists(model_path):
saver.restore(self.sess, model_path)
else:
self.sess.run(tf.global_variables_initializer())
train_step = 100000
reset_prob = 1.0
bmp = np.zeros([h, w], dtype=np.float32)
bmp_last = np.zeros([h, w], dtype=np.float32)
pos = np.random.rand(3)
vel = np.random.rand(3)
states = np.stack([vel, pos], axis=0)
states_last = np.copy(states)
loss_cache = np.zeros([1000])
for i in range(train_step):
if np.random.rand() < reset_prob:
bmp[:, :] = 0
pos = np.random.rand(3)
vel = np.random.rand(3) - 0.5
states[0, :] = vel[:]
states[1, :] = pos[:]
bmp_last[:, :] = bmp[:, :]
states_last[:, :] = states[:, :]
action_ = np.random.rand(3) - 0.5
action_[:2] = 0.1 * action_[:2]
action_[2] = 0.5 * action_[2]
# update the states with physical rules
update_sheet(bmp, pos, vel, action_)
states[0, :] = vel
states[1, :] = pos
pred, _, loss = self.sess.run(
[
self.t_pred_observ,
self.t_opt,
self.t_loss
],
feed_dict={
self.t_action: expand_dims(action_, axises=[0]),
self.t_states: states_last,
self.t_observ: expand_dims(bmp_last, axises=[-1, 0]),
self.t_next_observ: expand_dims(bmp, axises=[-1, 0])
}
)
loss_cache[i%len(loss_cache)] = loss
if (i + 1) % 1000 == 0:
loss_mean = np.mean(loss_cache)
loss_vari = np.sqrt(np.sum(np.square(loss_cache - loss_mean)) / (len(loss_cache) - 1))
print("Itr=%d Loss=%.5f(+/-%.5f)" % (i, loss_mean, loss_vari))
bmp_merged = merge_bmp(bmp, cut(bmp - bmp_last), cut(pred[0, :, :, 0] - bmp_last))
save_bmp(bmp_merged, i, dump_path)
saver.save(self.sess, model_path)
def example_chinese_word():
return np.array([
[0.01, 0.02, 0.2],
[0.0, -0.02, 0.0],
[0.3, -0.02, -0.07],
[-0.3, 0.04, 0.03],
[-0.1, 0.25, -0.5],
[-0.05, 0.0, 0.5],
[0.1, -0.7, -1.0],
[0.1, 0.0, 1.0],
[0.05, 0.6, 0.8],
[-0.35, 0.3, -1.5],
[0.0, -0.3, 1.0]
])
def dataset_stroke():
strokes = list()
# horizontal strokes
strokes.append(np.array([
[0.1, 0.05, 0.5],
]))
return strokes
if __name__ == '__main__':
bmp = np.zeros([h, w], dtype=np.float32)
pos = np.array([0.5, 0.5, 0.0])
vel = np.zeros([3])
moves = dataset_stroke()[0]
for mov_ in moves:
update_sheet(bmp, pos, vel, mov_)
visualize_bmp(bmp)
# sim = Simulator()
# sim.train('models/simulator.ckpt', 'shots')
# state_predictor = StatePredictor()
# state_predictor.train('models/state_predictor.ckpt', 'shots')
# observ_predictor = ObservationPredictor()
# observ_predictor.train('models/observ_predictor.ckpt', 'shots')
| 31.931818
| 102
| 0.528114
|
4a196720344766ddc87bbf07add93a6b48fe114f
| 5,276
|
py
|
Python
|
src/raman_fitting/exporting/exporter.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 3
|
2021-03-03T21:02:11.000Z
|
2021-05-14T09:24:40.000Z
|
src/raman_fitting/exporting/exporter.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 8
|
2021-06-25T22:54:53.000Z
|
2021-08-09T10:07:30.000Z
|
src/raman_fitting/exporting/exporter.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 2
|
2021-07-08T09:49:49.000Z
|
2022-03-19T14:43:01.000Z
|
import pandas as pd
from raman_fitting.exporting.plotting import fit_spectrum_plot, raw_data_export
import logging
logger = logging.getLogger(__name__)
class ExporterError(Exception):
"""Error occured during the exporting functions"""
class Exporter:
"""
The Exporter class handles all the exporting of spectra and models
into figures and xlsx files.
"""
def __init__(self, arg, raw_out=True, plot=True, model_names_prefix=["1st", "2nd"]):
self.raw_out = raw_out
self.plot = plot
try:
self.delegator(arg)
except ExporterError:
logger.warning(
"f{self.__class__.__qualname__} failed export from {type(arg)}"
)
except Exception as e:
logger.error(
"f{self.__class__.__qualname__} failed export with unexpected error {e}"
)
# Exporting and Plotting
def delegator(self, arg):
self.fitter = arg
if "Fitter" in type(arg).__name__:
self.fitter = arg
self.split_results()
if self.raw_out:
self.raw_export()
if self.plot:
self.export_fitting_plotting_models()
elif isinstance(arg, list):
# "list" in type([]).__name__:
# FIXME
try:
self.export_from_list(arg)
except Exception as e:
logger.error(
"f{self.__class__.__qualname__} failed export from list", e
)
else:
logger.warning(
"f{self.__class__.__qualname__} failed export from unknown arg type {type(arg)}"
)
raise ExporterError
def export_from_list(self, arg):
fitter_args = [i for i in arg if hasattr(arg, "fitter")]
if fitter_args:
FitRes = pd.concat(
[
val.FitParameters
for exp in fitter_args
for k, val in exp.fitter.FitResults.items()
]
)
_info = fitter_args[0].fitter.info
# self.fitter[0].fitter.info
self.export_fitparams_grp_per_model(FitRes, _info)
def export_fitparams_grp_per_model(self, FitRes, _info):
DestGrpDir = _info.get("DestGrpDir")
grpnm = _info["SampleGroup"]
for pknm, pkgrp in FitRes.groupby(level=0):
peak_destpath = DestGrpDir.joinpath(f"{grpnm}_FitParameters_{pknm}")
pkgrp.dropna(axis=1).to_excel(
peak_destpath.with_suffix(".xlsx"), index=False
)
def raw_export(self):
raw_data_export(self.fitter.spectra_arg.fitting_spectra)
def split_results(self):
pass
# self._2nd = _2nd
# _1st = {k:val for k,val in self.fitter.FitResults.items() if k.startswith('1st')}
# self._1st = _1st
def export_fitting_plotting_models(self):
pars1, pars2 = [], []
_1st = {
k: val for k, val in self.fitter.FitResults.items() if k.startswith("1st")
}
_2nd = {
k: val for k, val in self.fitter.FitResults.items() if k.startswith("2nd")
}
for modname_2, fitres_2 in _2nd.items():
self.export_xls_from_spec(fitres_2)
pars2.append(fitres_2.FitParameters)
for modname_1, fitres_1 in _1st.items():
self.export_xls_from_spec(fitres_1)
try:
fit_spectrum_plot(
modname_1,
modname_2,
fitres_1,
fitres_2,
plot_Annotation=True,
plot_Residuals=True,
)
except Exception as e:
print(
f"Error fit_spectrum_plot:{modname_1}, {fitres_1.raw_data_col}.\n {e}"
)
pars1.append(fitres_1.FitParameters)
return pd.concat(pars1, sort=False), pd.concat(pars2, sort=False)
def export_xls_from_spec(self, res_peak_spec):
try:
# sID = res_peak_spec.extrainfo['SampleID']
# peak_destpath = res_peak_spec.extrainfo['DestFittingComps.unique()[0].joinpath(f'Model_{res_peak_spec.peak_model}_{sID}')
# peak_destpath_extra = res_peak_spec.extrainfo.DestFittingComps.unique()[0].joinpath(f'Extra_{res_peak_spec.peak_model}_{sID}')
res_peak_spec.FitComponents.to_excel(
res_peak_spec.extrainfo["DestFittingModel"].with_suffix(".xlsx"),
index=False,
)
# res_peak_spec.extrainfo.to_excel(peak_destpath_extra.with_suffix('.xlsx'), index=False)
except Exception as e:
print("Error export_xls_from_spec", e)
# TODO define fuction for exporting all the indexes _all_index_export
# index = RamanExport().export_FitParams_Grp(FitParams1, FitParams2, export_info_out, grpnm,sID)
# all_index.append(index)
# pars_index = pd.DataFrame(*all_index,columns=list(GrpNames.sGrp_cols[0:2] +('PeakModel','DestPars')))
# pars_index.to_excel( export_info_out.get('DestGrpDir').joinpath(f'{sGr}_index.xlsx'))
| 36.895105
| 140
| 0.576194
|
4a19682e2c558afd870406e85f48116351d701bd
| 17,280
|
py
|
Python
|
VENV/lib/python3.6/site-packages/pandas/tests/dtypes/test_cast.py
|
workingyifei/display-pattern-generator
|
b27be84c6221fa93833f283109870737b05bfbf6
|
[
"MIT"
] | 69
|
2020-03-31T06:40:17.000Z
|
2022-02-25T11:48:18.000Z
|
venv/lib/python3.7/site-packages/pandas/tests/dtypes/test_cast.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 8
|
2019-12-04T23:44:11.000Z
|
2022-02-10T08:31:40.000Z
|
venv/lib/python3.7/site-packages/pandas/tests/dtypes/test_cast.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 28
|
2020-04-15T15:24:17.000Z
|
2021-12-26T04:05:02.000Z
|
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type,
construct_1d_object_array_from_listlike,
construct_1d_ndarray_preserving_na,
construct_1d_arraylike_from_scalar)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_numpy_array_equal(result, arr)
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
class TestInferDtype(object):
def testinfer_dtype_from_scalar(self):
# Test that infer_dtype_from_scalar is returning correct dtype for int
# and float.
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
data = 12
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.int64
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == dtypec
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.float64
for data in [True, False]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.bool_
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
for data in [np.datetime64(1, 'ns'), Timestamp(1),
datetime(2000, 1, 1, 0, 0)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'M8[ns]'
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
timedelta(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'm8[ns]'
for freq in ['M', 'D']:
p = Period('2011-01-01', freq=freq)
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True)
assert dtype == 'period[{0}]'.format(freq)
assert val == p.ordinal
dtype, val = infer_dtype_from_scalar(p)
dtype == np.object_
assert val == p
# misc
for data in [date(2000, 1, 1),
Timestamp(1, tz='US/Eastern'), 'foo']:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.object_
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern', 'Asia/Tokyo'])
def testinfer_from_scalar_tz(self, tz):
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True)
assert dtype == 'datetime64[ns, {0}]'.format(tz)
assert val == dt.value
dtype, val = infer_dtype_from_scalar(dt)
assert dtype == np.object_
assert val == dt
def testinfer_dtype_from_scalar_errors(self):
with pytest.raises(ValueError):
infer_dtype_from_scalar(np.array([1]))
@pytest.mark.parametrize(
"arr, expected, pandas_dtype",
[('foo', np.object_, False),
(b'foo', np.object_, False),
(1, np.int_, False),
(1.5, np.float_, False),
([1], np.int_, False),
(np.array([1], dtype=np.int64), np.int64, False),
([np.nan, 1, ''], np.object_, False),
(np.array([[1.0, 2.0]]), np.float_, False),
(pd.Categorical(list('aabc')), np.object_, False),
(pd.Categorical([1, 2, 3]), np.int64, False),
(pd.Categorical(list('aabc')), 'category', True),
(pd.Categorical([1, 2, 3]), 'category', True),
(Timestamp('20160101'), np.object_, False),
(np.datetime64('2016-01-01'), np.dtype('=M8[D]'), False),
(pd.date_range('20160101', periods=3),
np.dtype('=M8[ns]'), False),
(pd.date_range('20160101', periods=3, tz='US/Eastern'),
'datetime64[ns, US/Eastern]', True),
(pd.Series([1., 2, 3]), np.float64, False),
(pd.Series(list('abc')), np.object_, False),
(pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')),
'datetime64[ns, US/Eastern]', True)])
def test_infer_dtype_from_array(self, arr, expected, pandas_dtype):
dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)
assert is_dtype_equal(dtype, expected)
def test_cast_scalar_to_array(self):
arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64)
exp = np.ones((3, 2), dtype=np.int64)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((3, 2), 1.1)
exp = np.empty((3, 2), dtype=np.float64)
exp.fill(1.1)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01'))
exp = np.empty((2, 3), dtype='datetime64[ns]')
exp.fill(np.datetime64('2011-01-01'))
tm.assert_numpy_array_equal(arr, exp)
# pandas dtype is stored as object dtype
obj = Timestamp('2011-01-01', tz='US/Eastern')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
obj = Period('2011-01-01', freq='D')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
class TestMaybe(object):
def test_maybe_convert_string_to_array(self):
result = maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
assert result.dtype == object
result = maybe_convert_string_to_object(1)
assert result == 1
arr = np.array(['x', 'y'], dtype=str)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# unicode
arr = np.array(['x', 'y']).astype('U')
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# object
arr = np.array(['x', 2], dtype=object)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
assert result.dtype == object
def test_maybe_convert_scalar(self):
# pass thru
result = maybe_convert_scalar('x')
assert result == 'x'
result = maybe_convert_scalar(np.array([1]))
assert result == np.array([1])
# leave scalar dtype
result = maybe_convert_scalar(np.int64(1))
assert result == np.int64(1)
result = maybe_convert_scalar(np.int32(1))
assert result == np.int32(1)
result = maybe_convert_scalar(np.float32(1))
assert result == np.float32(1)
result = maybe_convert_scalar(np.int64(1))
assert result == np.float64(1)
# coerce
result = maybe_convert_scalar(1)
assert result == np.int64(1)
result = maybe_convert_scalar(1.0)
assert result == np.float64(1)
result = maybe_convert_scalar(Timestamp('20130101'))
assert result == Timestamp('20130101').value
result = maybe_convert_scalar(datetime(2013, 1, 1))
assert result == Timestamp('20130101').value
result = maybe_convert_scalar(Timedelta('1 day 1 min'))
assert result == Timedelta('1 day 1 min').value
def test_maybe_infer_to_datetimelike(self):
# GH16362
# pandas=0.20.1 raises IndexError: tuple index out of range
result = DataFrame(np.array([[NaT, 'a', 'b', 0],
[NaT, 'b', 'c', 1]]))
assert result.size == 8
# this construction was fine
result = DataFrame(np.array([[NaT, 'a', 0],
[NaT, 'b', 1]]))
assert result.size == 6
# GH19671
result = Series(['M1701', Timestamp('20130101')])
assert result.dtype.kind == 'O'
class TestConvert(object):
def test_maybe_convert_objects_copy(self):
values = np.array([1, 2])
out = maybe_convert_objects(values, copy=False)
assert values is out
out = maybe_convert_objects(values, copy=True)
assert values is not out
values = np.array(['apply', 'banana'])
out = maybe_convert_objects(values, copy=False)
assert values is out
out = maybe_convert_objects(values, copy=True)
assert values is not out
class TestCommonTypes(object):
def test_numpy_dtypes(self):
# (source_types, destination_type)
testcases = (
# identity
((np.int64,), np.int64),
((np.uint64,), np.uint64),
((np.float32,), np.float32),
((np.object,), np.object),
# into ints
((np.int16, np.int64), np.int64),
((np.int32, np.uint32), np.int64),
((np.uint16, np.uint64), np.uint64),
# into floats
((np.float16, np.float32), np.float32),
((np.float16, np.int16), np.float32),
((np.float32, np.int16), np.float32),
((np.uint64, np.int64), np.float64),
((np.int16, np.float64), np.float64),
((np.float16, np.int64), np.float64),
# into others
((np.complex128, np.int32), np.complex128),
((np.object, np.float32), np.object),
((np.object, np.int16), np.object),
# bool with int
((np.dtype('bool'), np.int64), np.object),
((np.dtype('bool'), np.int32), np.object),
((np.dtype('bool'), np.int16), np.object),
((np.dtype('bool'), np.int8), np.object),
((np.dtype('bool'), np.uint64), np.object),
((np.dtype('bool'), np.uint32), np.object),
((np.dtype('bool'), np.uint16), np.object),
((np.dtype('bool'), np.uint8), np.object),
# bool with float
((np.dtype('bool'), np.float64), np.object),
((np.dtype('bool'), np.float32), np.object),
((np.dtype('datetime64[ns]'), np.dtype('datetime64[ns]')),
np.dtype('datetime64[ns]')),
((np.dtype('timedelta64[ns]'), np.dtype('timedelta64[ns]')),
np.dtype('timedelta64[ns]')),
((np.dtype('datetime64[ns]'), np.dtype('datetime64[ms]')),
np.dtype('datetime64[ns]')),
((np.dtype('timedelta64[ms]'), np.dtype('timedelta64[ns]')),
np.dtype('timedelta64[ns]')),
((np.dtype('datetime64[ns]'), np.dtype('timedelta64[ns]')),
np.object),
((np.dtype('datetime64[ns]'), np.int64), np.object)
)
for src, common in testcases:
assert find_common_type(src) == common
with pytest.raises(ValueError):
# empty
find_common_type([])
def test_categorical_dtype(self):
dtype = CategoricalDtype()
assert find_common_type([dtype]) == 'category'
assert find_common_type([dtype, dtype]) == 'category'
assert find_common_type([np.object, dtype]) == np.object
def test_datetimetz_dtype(self):
dtype = DatetimeTZDtype(unit='ns', tz='US/Eastern')
assert find_common_type([dtype, dtype]) == 'datetime64[ns, US/Eastern]'
for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'),
np.dtype('datetime64[ns]'), np.object, np.int64]:
assert find_common_type([dtype, dtype2]) == np.object
assert find_common_type([dtype2, dtype]) == np.object
def test_period_dtype(self):
dtype = PeriodDtype(freq='D')
assert find_common_type([dtype, dtype]) == 'period[D]'
for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'),
PeriodDtype(freq='2D'), PeriodDtype(freq='H'),
np.dtype('datetime64[ns]'), np.object, np.int64]:
assert find_common_type([dtype, dtype2]) == np.object
assert find_common_type([dtype2, dtype]) == np.object
@pytest.mark.parametrize('datum1', [1, 2., "3", (4, 5), [6, 7], None])
@pytest.mark.parametrize('datum2', [8, 9., "10", (11, 12), [13, 14], None])
def test_cast_1d_array(self, datum1, datum2):
data = [datum1, datum2]
result = construct_1d_object_array_from_listlike(data)
# Direct comparison fails: https://github.com/numpy/numpy/issues/10218
assert result.dtype == 'object'
assert list(result) == data
@pytest.mark.parametrize('val', [1, 2., None])
def test_cast_1d_array_invalid_scalar(self, val):
pytest.raises(TypeError, construct_1d_object_array_from_listlike, val)
def test_cast_1d_arraylike_from_scalar_categorical(self):
# GH 19565 - Categorical result from scalar did not maintain categories
# and ordering of the passed dtype
cats = ['a', 'b', 'c']
cat_type = CategoricalDtype(categories=cats, ordered=False)
expected = pd.Categorical(['a', 'a'], categories=cats)
result = construct_1d_arraylike_from_scalar('a', len(expected),
cat_type)
tm.assert_categorical_equal(result, expected,
check_category_order=True,
check_dtype=True)
@pytest.mark.parametrize('values, dtype, expected', [
([1, 2, 3], None, np.array([1, 2, 3])),
(np.array([1, 2, 3]), None, np.array([1, 2, 3])),
(['1', '2', None], None, np.array(['1', '2', None])),
(['1', '2', None], np.dtype('str'), np.array(['1', '2', None])),
([1, 2, None], np.dtype('str'), np.array(['1', '2', None])),
])
def test_construct_1d_ndarray_preserving_na(values, dtype, expected):
result = construct_1d_ndarray_preserving_na(values, dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
| 37.894737
| 79
| 0.585532
|
4a1968791a65079fb0276f740d9d957058347dd3
| 396
|
py
|
Python
|
fwl-automation-decisions/domain/src/domain/model/zone/ZoneName.py
|
aherculano/fwl-project
|
6d4c4d40393b76d45cf13b572b5aabc0696e9285
|
[
"MIT"
] | null | null | null |
fwl-automation-decisions/domain/src/domain/model/zone/ZoneName.py
|
aherculano/fwl-project
|
6d4c4d40393b76d45cf13b572b5aabc0696e9285
|
[
"MIT"
] | null | null | null |
fwl-automation-decisions/domain/src/domain/model/zone/ZoneName.py
|
aherculano/fwl-project
|
6d4c4d40393b76d45cf13b572b5aabc0696e9285
|
[
"MIT"
] | null | null | null |
class ZoneName(object):
def __init__(self, value: str):
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value: str):
self._value = value.strip().upper()
def __eq__(self, other) -> bool:
if isinstance(other, ZoneName):
return self.value.__eq__(other.value)
return False
| 22
| 49
| 0.598485
|
4a19698c26c536b09708eb73487d5da431508734
| 3,745
|
py
|
Python
|
src/third_party/beaengine/tests/0f3865.py
|
CrackerCat/rp
|
5fe693c26d76b514efaedb4084f6e37d820db023
|
[
"MIT"
] | 1
|
2022-01-17T17:40:29.000Z
|
2022-01-17T17:40:29.000Z
|
src/third_party/beaengine/tests/0f3865.py
|
CrackerCat/rp
|
5fe693c26d76b514efaedb4084f6e37d820db023
|
[
"MIT"
] | null | null | null |
src/third_party/beaengine/tests/0f3865.py
|
CrackerCat/rp
|
5fe693c26d76b514efaedb4084f6e37d820db023
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.128.66.0F38.W0 65 /r
# vpblendmps xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
myEVEX = EVEX('EVEX.128.66.0F38.W0')
Buffer = bytes.fromhex('{}650e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x65)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpblendmps')
assert_equal(myDisasm.repr(), 'vpblendmps xmm25, xmm16, xmmword ptr [r14]')
# EVEX.256.66.0F38.W0 65 /r
# vpblendmps ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
myEVEX = EVEX('EVEX.256.66.0F38.W0')
Buffer = bytes.fromhex('{}650e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x65)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpblendmps')
assert_equal(myDisasm.repr(), 'vpblendmps ymm25, ymm16, ymmword ptr [r14]')
# EVEX.512.66.0F38.W0 65 /r
# vpblendmps zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
myEVEX = EVEX('EVEX.512.66.0F38.W0')
Buffer = bytes.fromhex('{}650e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x65)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpblendmps')
assert_equal(myDisasm.repr(), 'vpblendmps zmm25, zmm16, zmmword ptr [r14]')
# EVEX.128.66.0F38.W1 65 /r
# vpblendmpd xmm1 {k1}{z}, xmm2, xmm3/m128/m65bcst
myEVEX = EVEX('EVEX.128.66.0F38.W1')
Buffer = bytes.fromhex('{}650e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x65)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpblendmpd')
assert_equal(myDisasm.repr(), 'vpblendmpd xmm25, xmm16, xmmword ptr [r14]')
# EVEX.256.66.0F38.W1 65 /r
# vpblendmpd ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
myEVEX = EVEX('EVEX.256.66.0F38.W1')
Buffer = bytes.fromhex('{}650e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x65)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpblendmpd')
assert_equal(myDisasm.repr(), 'vpblendmpd ymm25, ymm16, ymmword ptr [r14]')
# EVEX.512.66.0F38.W1 65 /r
# vpblendmpd zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
myEVEX = EVEX('EVEX.512.66.0F38.W1')
Buffer = bytes.fromhex('{}650e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x65)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpblendmpd')
assert_equal(myDisasm.repr(), 'vpblendmpd zmm25, zmm16, zmmword ptr [r14]')
| 42.078652
| 83
| 0.657143
|
4a196b3e67747aa6e900537d286e290b24c2a492
| 5,959
|
py
|
Python
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/reporters/pushover.py
|
Alecto3-D/testable-greeter
|
09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78
|
[
"MIT"
] | 2
|
2017-07-11T18:56:27.000Z
|
2017-07-28T14:01:12.000Z
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/reporters/pushover.py
|
Alecto3-D/testable-greeter
|
09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78
|
[
"MIT"
] | 1
|
2017-07-28T13:53:41.000Z
|
2017-07-31T15:30:40.000Z
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/reporters/pushover.py
|
Alecto3-D/testable-greeter
|
09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78
|
[
"MIT"
] | null | null | null |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.python import log as twlog
from buildbot import config
from buildbot.process.results import CANCELLED
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.process.results import Results
from buildbot.reporters.message import MessageFormatter as DefaultMessageFormatter
from buildbot.reporters.message import MessageFormatterMissingWorker
from buildbot.reporters.notifier import NotifierBase
from buildbot.util import httpclientservice
ENCODING = 'utf8'
VALID_PARAMS = {"sound", "callback", "timestamp", "url",
"url_title", "device", "retry", "expire", "html"}
PRIORITIES = {
CANCELLED: 'cancelled',
EXCEPTION: 'exception',
FAILURE: 'failing',
SUCCESS: 'passing',
WARNINGS: 'warnings'
}
class PushoverNotifier(NotifierBase):
def checkConfig(self, user_key, api_token,
mode=("failing", "passing", "warnings"),
tags=None, builders=None,
buildSetSummary=False, messageFormatter=None,
subject="Buildbot %(result)s in %(title)s on %(builder)s",
name=None, schedulers=None, branches=None,
priorities=None, otherParams=None,
watchedWorkers=None, messageFormatterMissingWorker=None):
super(PushoverNotifier, self).checkConfig(mode, tags, builders,
buildSetSummary, messageFormatter,
subject, False, False,
name, schedulers,
branches, watchedWorkers)
httpclientservice.HTTPClientService.checkAvailable(self.__class__.__name__)
if otherParams is not None and set(otherParams.keys()) - VALID_PARAMS:
config.error("otherParams can be only 'sound', 'callback', 'timestamp', "
"'url', 'url_title', 'device', 'retry', 'expire', or 'html'")
@defer.inlineCallbacks
def reconfigService(self, user_key, api_token,
mode=("failing", "passing", "warnings"),
tags=None, builders=None,
buildSetSummary=False, messageFormatter=None,
subject="Buildbot %(result)s in %(title)s on %(builder)s",
name=None, schedulers=None, branches=None,
priorities=None, otherParams=None,
watchedWorkers=None, messageFormatterMissingWorker=None):
if messageFormatter is None:
messageFormatter = DefaultMessageFormatter(template_type='html',
template_filename='default_notification.txt')
if messageFormatterMissingWorker is None:
messageFormatterMissingWorker = MessageFormatterMissingWorker(
template_filename='missing_notification.txt')
super(PushoverNotifier, self).reconfigService(mode, tags, builders,
buildSetSummary, messageFormatter,
subject, False, False,
name, schedulers, branches,
watchedWorkers, messageFormatterMissingWorker)
self.user_key = user_key
self.api_token = api_token
if priorities is None:
self.priorities = {}
else:
self.priorities = priorities
if otherParams is None:
self.otherParams = {}
else:
self.otherParams = otherParams
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, 'https://api.pushover.net')
def sendMessage(self, body, subject=None, type=None, builderName=None,
results=None, builds=None, users=None, patches=None,
logs=None, worker=None):
if worker is not None and worker not in self.watchedWorkers:
return
msg = {'message': body}
if type == 'html':
msg['html'] = '1'
try:
msg['priority'] = self.priorities[PRIORITIES[results] if worker is None else 'worker_missing']
except KeyError:
pass
if subject is not None:
msg['title'] = subject
else:
msg['title'] = self.subject % {'result': Results[results],
'projectName': self.master.config.title,
'title': self.master.config.title,
'builder': builderName}
return self.sendNotification(msg)
def sendNotification(self, params):
twlog.msg("sending pushover notification")
params.update(dict(user=self.user_key, token=self.api_token))
params.update(self.otherParams)
return self._http.post('/1/messages.json', params=params)
| 44.804511
| 106
| 0.608827
|
4a196b53ef74994b9d4760dfbbf5ce4641ccbb52
| 1,606
|
py
|
Python
|
setup.py
|
CADWRDeltaModeling/pydelmod
|
31700b6853467dfb1af418267426e5014369080f
|
[
"MIT"
] | null | null | null |
setup.py
|
CADWRDeltaModeling/pydelmod
|
31700b6853467dfb1af418267426e5014369080f
|
[
"MIT"
] | 4
|
2020-01-25T00:19:45.000Z
|
2021-04-06T22:46:34.000Z
|
setup.py
|
CADWRDeltaModeling/pydelmod
|
31700b6853467dfb1af418267426e5014369080f
|
[
"MIT"
] | 2
|
2019-11-06T20:29:35.000Z
|
2020-01-03T19:44:55.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
##------------------ VERSIONING BEST PRACTICES --------------------------##
import versioneer
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['python', 'pandas', 'pyhecdss', 'pydsm',
'plotly', 'psutil', 'plotly-orca', 'netcdf4', 'qgrid']
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Kijin Nam",
author_email='knam@water.ca.gov',
python_requires='>=3.7',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
],
description="Python package to work with Delta Modeling tasks",
entry_points={
'console_scripts': [
'pydelmod=pydelmod.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='pydelmod',
name='pydelmod',
packages=find_packages(include=['pydelmod', 'pydelmod.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/CADWRDeltaModeling/pydelmod',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
zip_safe=False,
)
| 28.678571
| 75
| 0.631382
|
4a196cdd07669d0c31fcae309dc385ce2f99d451
| 931
|
py
|
Python
|
setup.py
|
traviscook21/pylivy
|
01bd6bf974323dbe366a7045f5b7cea0aac759dc
|
[
"MIT"
] | null | null | null |
setup.py
|
traviscook21/pylivy
|
01bd6bf974323dbe366a7045f5b7cea0aac759dc
|
[
"MIT"
] | null | null | null |
setup.py
|
traviscook21/pylivy
|
01bd6bf974323dbe366a7045f5b7cea0aac759dc
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from setuptools import setup
README = Path(__file__).parent / "README.rst"
setup(
name="livy",
description="A Python client for Apache Livy",
long_description=README.read_text(),
packages=["livy"],
url="https://github.com/acroz/pylivy",
author="Andrew Crozier",
author_email="wacrozier@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
use_scm_version={"version_scheme": "post-release"},
setup_requires=["wheel", "setuptools_scm"],
install_requires=[
"dataclasses; python_version<'3.7'",
"requests",
"pandas",
],
extras_require={"docs": ["sphinx", "sphinx-autodoc-typehints"]},
)
| 28.212121
| 68
| 0.622986
|
4a196dc030f836477d25d47ccb386d892ec65f3d
| 6,508
|
py
|
Python
|
aws_jumpcloud/jumpcloud.py
|
jtimberlake/aws-jumpcloud
|
61b92f451587635e93437307c85ff0775a5a2f83
|
[
"MIT"
] | null | null | null |
aws_jumpcloud/jumpcloud.py
|
jtimberlake/aws-jumpcloud
|
61b92f451587635e93437307c85ff0775a5a2f83
|
[
"MIT"
] | null | null | null |
aws_jumpcloud/jumpcloud.py
|
jtimberlake/aws-jumpcloud
|
61b92f451587635e93437307c85ff0775a5a2f83
|
[
"MIT"
] | null | null | null |
import base64
from datetime import datetime, timezone
from json import JSONDecodeError
import sys
from bs4 import BeautifulSoup # pylint: disable=E0401
from requests import Session as HTTPSession
from aws_jumpcloud.keyring import Keyring
import aws_jumpcloud.onepassword as op
class JumpCloudSession(object):
HTTP_TIMEOUT = 5
def __init__(self, email, password):
self.email = email
self.password = password
self.http = HTTPSession()
self.logged_in = False
self.xsrf_token = None
def login(self):
try:
self._authenticate()
except JumpCloudMFARequired as e:
if sys.stdout.isatty():
otp = self._get_mfa()
self._authenticate(otp=otp)
else:
raise e
def _get_mfa(self):
if op.installed():
sys.stderr.write(f"1Password CLI found. Using OTP from item: {op.ITEM}\n")
mfa = op.get_totp()
if mfa:
return mfa
else:
sys.stderr.write(f"1Password OTP not configured for item: {op.ITEM}. "
"Falling back to user input.\n")
return self._input_mfa()
else:
return self._input_mfa()
def _input_mfa(self):
return input("Enter your JumpCloud multi-factor auth code: ").strip()
def _authenticate(self, otp=None):
assert(not self.logged_in)
headers = {'Content-Type': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'X-Xsrftoken': self._get_xsrf_token()}
data = {"email": self.email, "password": self.password}
if otp is not None:
data['otp'] = otp
auth_resp = self.http.post("https://console.jumpcloud.com/userconsole/auth",
headers=headers, json=data, allow_redirects=False,
timeout=JumpCloudSession.HTTP_TIMEOUT)
if auth_resp.status_code == 200:
self.logged_in = True
Keyring().store_jumpcloud_timestamp(datetime.now(tz=timezone.utc))
else:
raise self._auth_failure_exception(auth_resp, otp)
def _auth_failure_exception(self, auth_resp, otp):
assert(auth_resp.status_code != 200)
if self._is_mfa_missing(auth_resp, otp):
exception = JumpCloudMFARequired(auth_resp)
elif self._is_mfa_failure(auth_resp, otp):
exception = JumpCloudMFAFailure(auth_resp)
elif auth_resp.status_code == 401:
exception = JumpCloudAuthFailure(auth_resp)
elif auth_resp.status_code > 500:
exception = JumpCloudServerError(auth_resp)
else:
exception = JumpCloudUnexpectedStatus(auth_resp)
return exception
def _is_mfa_missing(self, auth_resp, otp):
return auth_resp.status_code == 302 and otp is None and \
"error=4014" in auth_resp.headers['Location']
def _is_mfa_failure(self, auth_resp, otp):
try:
error_msg = auth_resp.json().get("error", "")
except JSONDecodeError:
error_msg = ""
return auth_resp.status_code == 401 and otp is not None and \
"multifactor" in error_msg
def _get_xsrf_token(self):
if self.xsrf_token is None:
xsrf_resp = self.http.get("https://console.jumpcloud.com/userconsole/xsrf",
timeout=JumpCloudSession.HTTP_TIMEOUT)
assert(xsrf_resp.status_code == 200)
self.xsrf_token = xsrf_resp.json().get("xsrf")
return self.xsrf_token
def get_aws_saml_assertion(self, profile):
assert(self.logged_in)
aws_resp = self.http.get(profile.jumpcloud_url)
if aws_resp.status_code != 200:
raise JumpCloudUnexpectedStatus(aws_resp)
if "SAMLResponse" not in aws_resp.text:
raise JumpCloudMissingSAMLResponse(aws_resp)
return self._extract_saml_response(aws_resp.text)
def _extract_saml_response(self, html):
soup = BeautifulSoup(html, "lxml")
tag = soup.find("input", attrs={'name': "SAMLResponse"})
assert(tag is not None)
saml_response_b64 = tag.attrs['value']
saml_response = base64.b64decode(saml_response_b64)
return saml_response
class JumpCloudError(Exception):
def __init__(self, message, resp):
Exception.__init__(self, message)
self.message = message
self.response = resp
try:
self.jumpcloud_error_message = resp.json().get("error")
except JSONDecodeError:
self.jumpcloud_error_message = None
class JumpCloudServerError(JumpCloudError):
def __init__(self, resp):
message = f"JumpCloud returned HTTP {resp.status_code} server error"
JumpCloudError.__init__(self, message, resp)
class JumpCloudAuthFailure(JumpCloudError):
def __init__(self, resp=None):
message = """
JumpCloud authentication failed. Check your username and password and try again.
If you are authenticating with a MFA token, ensure you are not reusing a token.
"""
JumpCloudError.__init__(self, message, resp)
class JumpCloudMFARequired(JumpCloudError):
def __init__(self, resp):
message = "Multi-factor authentication is required on your JumpCloud account."
JumpCloudError.__init__(self, message, resp)
class JumpCloudMFAFailure(JumpCloudError):
def __init__(self, resp):
message = "Multi-factor authentication failed. Check your MFA token and try again."
JumpCloudError.__init__(self, message, resp)
class JumpCloudUnexpectedStatus(JumpCloudError):
"""Indicates a response that we weren't expecting, i.e. that JumpCloud
changed their auth workflow or we didn't reverse-engineer it properly."""
def __init__(self, resp):
message = f"JumpCloud returned unexpected HTTP {resp.status_code} response"
JumpCloudError.__init__(self, message, resp)
class JumpCloudMissingSAMLResponse(JumpCloudError):
"""Indicates that the SSO URL did not include the expected SAMLResponse
field. Either the profile contains an incorrect URL, or JumpCloud changed
their SSO workflow."""
def __init__(self, resp):
message = "JumpCloud's SSO response did not contain the expected \"SAMLResponse\" field."
JumpCloudError.__init__(self, message, resp)
| 37.402299
| 97
| 0.644438
|
4a196ea4925983aeaca6d26b58181606709b1786
| 8,324
|
py
|
Python
|
openpnm/algorithms/Porosimetry.py
|
halotudio/openPNM-copy2
|
d400ec65e9421256a531f6d22a38255b002d5dcb
|
[
"MIT"
] | 1
|
2021-05-01T11:10:43.000Z
|
2021-05-01T11:10:43.000Z
|
openpnm/algorithms/Porosimetry.py
|
Jimmy-INL/OpenPNM
|
1546fa1ac2204443bde916f2037fac383c5069ae
|
[
"MIT"
] | null | null | null |
openpnm/algorithms/Porosimetry.py
|
Jimmy-INL/OpenPNM
|
1546fa1ac2204443bde916f2037fac383c5069ae
|
[
"MIT"
] | null | null | null |
from openpnm.algorithms import OrdinaryPercolation
from openpnm.utils import logging
import numpy as np
logger = logging.getLogger(__name__)
class Porosimetry(OrdinaryPercolation):
r"""
Simulates mercury instrustion porosimetry using ordinary percolation
Parameters
----------
network : OpenPNM Network object
The Network upon which this simulation should be run
name : string, optional
An identifying name for the object. If none is given then one is
generated.
project : OpenPNM Project object
Either a Network or a Project must be specified
Notes
-----
Mercury intrusion progresses by applying increasing pressures to the
invading mercury phase, and measuring the resultant volume of invading
fluid. This corresponds directly to an ordinary percolation process,
with access limitations enabled.
See Also
--------
OrdinaryPercolation
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'pore_volume': 'pore.volume',
'throat_volume': 'throat.volume',
'mode': 'bond',
'access_limited': True,
'quantity': 'pressure',
'throat_entry_pressure': 'throat.entry_pressure',
'pore_volume': 'pore.volume',
'throat_volume': 'throat.volume',
'late_pore_filling': '',
'late_throat_filling': '',
'gui': {'setup': {'phase': None,
'quantity': '',
'throat_entry_pressure': '',
'pore_volume': '',
'throat_volume': '',
'late_pore_filling': '',
'late_throat_filling': ''},
'set_inlets': {'pores': None,
'overwrite': False},
'set_outlets': {'pores': None,
'overwrite': False},
'set_residual': {'pores': None,
'throats': None,
'overwrite': False}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
# Apply user settings, if any
self.settings.update(settings)
# Use the reset method to initialize all arrays
self.reset()
if phase is not None:
self.setup(phase=phase)
def setup(self,
phase=None,
quantity='',
throat_entry_pressure='',
pore_volume='',
throat_volume='',
late_pore_filling='',
late_throat_filling=''):
r"""
Used to specify necessary arguments to the simulation. This method is
useful for resetting the algorithm or applying more explicit control.
Parameters
----------
phase : OpenPNM Phase object
The Phase object containing the physical properties of the invading
fluid.
quantity : string
The name of the quantity calculated by this algorithm. This is
used for instance, by the late pore and throat filling models
to indicate the prevailing fluid pressure in the invading phase
for calculating the extent of filling. The default is
'pressure'. Note that there is no need to specify 'pore' and/or
'throat' with this as the given value will apply to both.
throat_entry_pressure : string
The dictionary key on the Phase object where the throat entry
pressure values are stored. The default is
'throat.entry_pressure'.
pore_volume : string
The dictionary key containing the pore volume information. The
default is 'pore.volume'.
throat_volume : string
The dictionary key containing the throat volume information. The
default is 'throat.volume'.
pore_partial_filling : string
The name of the model used to determine partial pore filling as
a function of applied pressure.
throat_partial_filling : string
The name of the model used to determine partial throat filling as
a function of applied pressure.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if throat_entry_pressure:
self.settings['throat_entry_pressure'] = throat_entry_pressure
phase = self.project.find_phase(self)
self['throat.entry_pressure'] = phase[throat_entry_pressure]
if pore_volume:
self.settings['pore_volume'] = pore_volume
if throat_volume:
self.settings['throat_volume'] = throat_volume
if late_pore_filling:
self.settings['late_pore_filling'] = late_pore_filling
if late_throat_filling:
self.settings['late_throat_filling'] = late_throat_filling
def set_partial_filling(self, propname):
r"""
Define which pore filling model to apply.
Parameters
----------
propname : string
Dictionary key on the physics object(s) containing the pore
filling model(s) to apply.
Notes
-----
It is assumed that these models are functions of the `quantity`
specified in the algorithms settings. This values is applied to the
corresponding phase just prior to regenerating the given pore-scale
model(s).
"""
if propname.startswith('pore'):
self.settings['pore_partial_filling'] = propname
if propname.startswith('throat'):
self.settings['throat_partial_filling'] = propname
def run(self, points=25, start=None, stop=None):
if self.settings['mode'] != 'bond':
raise Exception('Porosimetry must be run as bond percolation')
if self.settings['access_limited'] is False:
raise Exception('Porosimetry must be run as access limited')
super().run(points=points, start=start, stop=stop)
run.__doc__ = OrdinaryPercolation.run.__doc__
def results(self, Pc=None):
r"""
"""
if Pc is None:
p_inv = self['pore.invasion_pressure']
t_inv = self['throat.invasion_pressure']
results = {'pore.invasion_pressure': p_inv,
'throat.invasion_pressure': t_inv}
else:
p_inv, t_inv = super().results(Pc).values()
phase = self.project.find_phase(self)
quantity = self.settings['quantity'].split('.')[-1]
lpf = np.array([1])
if self.settings['pore_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['pore.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['pore_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
lpf = phase[self.settings['pore_partial_filling']]
# Calculate filled throat volumes
ltf = np.array([1])
if self.settings['throat_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['throat.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['throat_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
ltf = phase[self.settings['throat_partial_filling']]
p_inv = p_inv*lpf
t_inv = t_inv*ltf
results = {'pore.occupancy': p_inv, 'throat.occupancy': t_inv}
return results
| 40.407767
| 83
| 0.566314
|
4a196f27a908cd166610531f97b8b3ceec6572fb
| 32,065
|
py
|
Python
|
tests/test_client.py
|
awesome-archive/WeRoBot
|
42ac05aa2780fb3681d82c5f8612956d2990c630
|
[
"MIT"
] | 1
|
2017-06-30T01:29:33.000Z
|
2017-06-30T01:29:33.000Z
|
tests/test_client.py
|
awesome-archive/WeRoBot
|
42ac05aa2780fb3681d82c5f8612956d2990c630
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
awesome-archive/WeRoBot
|
42ac05aa2780fb3681d82c5f8612956d2990c630
|
[
"MIT"
] | 1
|
2020-11-01T16:35:32.000Z
|
2020-11-01T16:35:32.000Z
|
# -*- coding: utf-8 -*-
import os
import responses
import json
import pytest
import requests
from werobot import WeRoBot
from werobot.config import Config
from werobot.client import Client, check_error, ClientException
from werobot.utils import cached_property
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
basedir = os.path.dirname(os.path.abspath(__file__))
TOKEN_URL = "https://api.weixin.qq.com/cgi-bin/token"
json_header = {'content-type': 'application/json'}
def token_callback(request):
return 200, json_header, json.dumps({"access_token": "ACCESS_TOKEN", "expires_in": 7200})
def add_token_response(method):
def wrapped_func(self, *args, **kwargs):
responses.add_callback(responses.GET, TOKEN_URL, callback=token_callback)
method(self, *args, **kwargs)
return wrapped_func
class BaseTestClass:
@cached_property
def client(self):
config = Config()
config.from_pyfile(os.path.join(basedir, "client_config.py"))
return Client(config)
@staticmethod
def callback_without_check(request):
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
class TestClientBaseClass(BaseTestClass):
def test_id_and_secret(self):
assert self.client.appid == "123"
assert self.client.appsecret == "321"
def test_robot_client(self):
robot = WeRoBot()
assert robot.client.config == robot.config
def test_robot_reuse_client(self):
robot = WeRoBot()
client_1 = robot.client
client_2 = robot.client
assert client_1 is client_2
def test_check_error(self):
error_json = dict(
error_code=0
)
assert error_json == check_error(error_json)
error_json = dict(
errcode=1,
errmsg="test"
)
with pytest.raises(ClientException) as err:
check_error(error_json)
assert err.value.args[0] == "1: test"
@responses.activate
@add_token_response
def test_grant_token(self):
# responses.add_callback(responses.GET, TOKEN_URL, callback=token_callback)
self.client.grant_token()
assert self.client.token == "ACCESS_TOKEN"
@responses.activate
@add_token_response
def test_client_request(self):
EMPTY_PARAMS_URL = "http://empty-params.werobot.com/"
DATA_EXISTS_URL = "http://data-exists.werobot.com/"
def empty_params_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert params["access_token"][0] == self.client.token
return 200, json_header, json.dumps({"test": "test"})
def data_exists_url(request):
assert json.loads(request.body.decode('utf-8')) == {"test": "test"}
return 200, json_header, json.dumps({"test": "test"})
responses.add_callback(responses.POST, DATA_EXISTS_URL, callback=data_exists_url)
responses.add_callback(responses.GET, EMPTY_PARAMS_URL, callback=empty_params_callback)
responses.add_callback(responses.GET, TOKEN_URL, callback=token_callback)
r = self.client.get(url=EMPTY_PARAMS_URL)
assert r == {"test": "test"}
r = self.client.post(url=DATA_EXISTS_URL, data={"test": "test"})
assert r == {"test": "test"}
class TestClientMenuClass(BaseTestClass):
CREATE_URL = "https://api.weixin.qq.com/cgi-bin/menu/create"
GET_URL = "https://api.weixin.qq.com/cgi-bin/menu/get"
DELETE_URL = "https://api.weixin.qq.com/cgi-bin/menu/delete"
menu_data = {
"button": [
{
"type": "click",
"name": u"今日歌曲",
"key": "V1001_TODAY_MUSIC"
},
{
"type": "click",
"name": u"歌手简介",
"key": "V1001_TODAY_SINGER"
},
{
"name": u"菜单",
"sub_button": [
{
"type": "view",
"name": u"搜索",
"url": "http://www.soso.com/"
},
{
"type": "view",
"name": u"视频",
"url": "http://v.qq.com/"
},
{
"type": "click",
"name": u"赞一下我们",
"key": "V1001_GOOD"
}
]
}
]}
@staticmethod
def create_menu_callback(request):
def check_menu_data(item):
keys = item.keys()
assert "name" in keys
if "sub_button" in keys:
for button in item["sub_button"]:
check_menu_data(button)
return
assert "type" in keys
if "type" == "click":
assert "key" in keys
elif "type" == "view":
assert "url" in keys
elif "type" == "media_id" or "type" == "view_limited":
assert "media_id" in keys
try:
body = json.loads(request.body.decode("utf-8"))["button"]
except KeyError:
return 200, json_header, json.dumps({"errcode": 1, "errmsg": "error"})
try:
for item in body:
check_menu_data(item)
except AssertionError:
return 200, json_header, json.dumps({"errcode": 1, "errmsg": "error"})
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_create_menu(self):
responses.add_callback(responses.POST, self.CREATE_URL, callback=self.create_menu_callback)
r = self.client.create_menu(self.menu_data)
assert r == {"errcode": 0, "errmsg": "ok"}
with pytest.raises(ClientException) as err:
self.client.create_menu({"error": "error"})
assert err.value.args[0] == "1: error"
@responses.activate
@add_token_response
def test_get_menu(self):
responses.add_callback(responses.GET, self.GET_URL, callback=self.callback_without_check)
r = self.client.get_menu()
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_delete_menu(self):
responses.add_callback(responses.GET, self.DELETE_URL, callback=self.callback_without_check)
r = self.client.delete_menu()
assert r == {"errcode": 0, "errmsg": "ok"}
class TestClientGroupClass(BaseTestClass):
CREATE_URL = "https://api.weixin.qq.com/cgi-bin/groups/create"
GET_URL = "https://api.weixin.qq.com/cgi-bin/groups/get"
GET_WITH_ID_URL = "https://api.weixin.qq.com/cgi-bin/groups/getid"
UPDATE_URL = "https://api.weixin.qq.com/cgi-bin/groups/update"
MOVE_URL = "https://api.weixin.qq.com/cgi-bin/groups/members/update"
MOVE_USERS_URL = "https://api.weixin.qq.com/cgi-bin/groups/members/batchupdate"
DELETE_URL = "https://api.weixin.qq.com/cgi-bin/groups/delete"
@staticmethod
def create_group_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "group" in body.keys()
assert "name" in body["group"].keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def get_groups_with_id_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "openid" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def update_group_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "group" in body.keys()
assert "id" in body["group"].keys()
assert "name" in body["group"].keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def move_user_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "openid" in body.keys()
assert "to_groupid" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def move_users_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "openid_list" in body.keys()
assert "to_groupid" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def delete_group_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "group" in body.keys()
assert "id" in body["group"].keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_create_group(self):
responses.add_callback(responses.POST, self.CREATE_URL, callback=self.create_group_callback)
r = self.client.create_group("test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_get_group(self):
responses.add_callback(responses.GET, self.GET_URL, callback=self.callback_without_check)
r = self.client.get_groups()
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_get_group_with_id(self):
responses.add_callback(
responses.POST,
self.GET_WITH_ID_URL,
callback=self.get_groups_with_id_callback
)
r = self.client.get_group_by_id("test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_update_group(self):
responses.add_callback(responses.POST, self.UPDATE_URL, callback=self.update_group_callback)
r = self.client.update_group("0", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_move_user(self):
responses.add_callback(responses.POST, self.MOVE_URL, callback=self.move_user_callback)
r = self.client.move_user("test", "0")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_move_users(self):
responses.add_callback(
responses.POST,
self.MOVE_USERS_URL,
callback=self.move_users_callback
)
r = self.client.move_users("test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_delete_group(self):
responses.add_callback(responses.POST, self.DELETE_URL, callback=self.delete_group_callback)
r = self.client.delete_group("test")
assert r == {"errcode": 0, "errmsg": "ok"}
class TestClientRemarkClass(BaseTestClass):
REMARK_URL = "https://api.weixin.qq.com/cgi-bin/user/info/updateremark"
@staticmethod
def remark_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "openid" in body.keys()
assert "remark" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_client_remark(self):
responses.add_callback(responses.POST, self.REMARK_URL, callback=self.remark_callback)
r = self.client.remark_user("test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
class TestClientUserInfo(BaseTestClass):
SINGLE_USER_URL = "https://api.weixin.qq.com/cgi-bin/user/info"
MULTI_USER_URL = "https://api.weixin.qq.com/cgi-bin/user/info/batchget"
@staticmethod
def single_user_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
assert "openid" in params.keys()
assert "lang" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def multi_user_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "user_list" in body.keys()
for user in body["user_list"]:
assert "openid" in user.keys()
assert "lang" in user.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_single_user(self):
responses.add_callback(
responses.GET,
self.SINGLE_USER_URL,
callback=self.single_user_callback
)
r = self.client.get_user_info("test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_multi_user(self):
responses.add_callback(
responses.POST,
self.MULTI_USER_URL,
callback=self.multi_user_callback
)
r = self.client.get_users_info(["test1", "test2"])
assert r == {"errcode": 0, "errmsg": "ok"}
class TestClientGetFollowersClass(BaseTestClass):
FOLLOWER_URL = "https://api.weixin.qq.com/cgi-bin/user/get"
@staticmethod
def get_followers_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
assert "next_openid" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_get_followers(self):
responses.add_callback(
responses.GET,
self.FOLLOWER_URL,
callback=self.get_followers_callback
)
r = self.client.get_followers("test")
assert r == {"errcode": 0, "errmsg": "ok"}
class TestClientCustomMenuClass(BaseTestClass):
CREATE_URL = "https://api.weixin.qq.com/cgi-bin/menu/addconditional"
DELETE_URL = "https://api.weixin.qq.com/cgi-bin/menu/delconditional"
MATCH_URL = "https://api.weixin.qq.com/cgi-bin/menu/trymatch"
custom_data = {
"menu_data": [
{
"type": "click",
"name": u"今日歌曲",
"key": "V1001_TODAY_MUSIC"
},
{
"name": u"菜单",
"sub_button": [
{
"type": "view",
"name": u"搜索",
"url": "http://www.soso.com/"
},
{
"type": "view",
"name": u"视频",
"url": "http://v.qq.com/"
},
{
"type": "click",
"name": u"赞一下我们",
"key": "V1001_GOOD"
}]
}],
"matchrule": {
"group_id": "2",
"sex": "1",
"country": u"中国",
"province": u"广东",
"city": u"广州",
"client_platform_type": "2",
"language": "zh_CN"
}
}
@staticmethod
def create_custom_menu_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "button" in body.keys()
assert "matchrule" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def delete_custom_menu_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "menuid" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def match_custom_menu(request):
body = json.loads(request.body.decode("utf-8"))
assert "user_id" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_create_custom_menu(self):
responses.add_callback(
responses.POST,
self.CREATE_URL,
callback=self.create_custom_menu_callback
)
r = self.client.create_custom_menu(**self.custom_data)
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_delete_custom_menu(self):
responses.add_callback(
responses.POST,
self.DELETE_URL,
callback=self.delete_custom_menu_callback
)
r = self.client.delete_custom_menu("test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_march_custom_menu(self):
responses.add_callback(responses.POST, self.MATCH_URL, callback=self.match_custom_menu)
r = self.client.match_custom_menu("test")
assert r == {"errcode": 0, "errmsg": "ok"}
class TestClientResourceClass(BaseTestClass):
UPLOAD_URL = "https://api.weixin.qq.com/cgi-bin/media/upload"
DOWNLOAD_URL = "https://api.weixin.qq.com/cgi-bin/media/get"
ADD_NEWS_URL = "https://api.weixin.qq.com/cgi-bin/material/add_news"
UPLOAD_PICTURE_URL = "https://api.weixin.qq.com/cgi-bin/media/uploadimg"
UPLOAD_P_URL = "https://api.weixin.qq.com/cgi-bin/material/add_material"
DOWNLOAD_P_URL = "https://api.weixin.qq.com/cgi-bin/material/get_material"
DELETE_P_URL = "https://api.weixin.qq.com/cgi-bin/material/del_material"
UPDATE_NEWS_URL = "https://api.weixin.qq.com/cgi-bin/material/update_news"
add_news_data = [{
"title": "test_title",
"thumb_media_id": "test",
"author": "test",
"digest": "test",
"show_cover_pic": 1,
"content": "test",
"content_source_url": "test"
}]
update_data = {
"media_id": "test",
"index": "test",
"articles": {
"title": "test",
"thumb_media_id": "test",
"author": "test",
"digest": "test",
"show_cover_pic": 1,
"content": "test",
"content_source_url": "test"
}
}
@staticmethod
def upload_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "type" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def download_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "media_id" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def add_news_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "articles" in body.keys()
for article in body["articles"]:
assert "title" in article.keys()
assert "thumb_media_id" in article.keys()
assert "author" in article.keys()
assert "digest" in article.keys()
assert "show_cover_pic" in article.keys()
assert "content" in article.keys()
assert "content_source_url" in article.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def upload_picture_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def upload_p_media_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
assert "type" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def download_p_media_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
body = json.loads(request.body.decode("utf-8"))
assert "media_id" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def delete_p_media_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "media_id" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def update_news_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "media_id" in body.keys()
assert "index" in body.keys()
assert "articles" in body.keys()
articles = body["articles"]
assert "title" in articles.keys()
assert "thumb_media_id" in articles.keys()
assert "author" in articles.keys()
assert "digest" in articles.keys()
assert "show_cover_pic" in articles.keys()
assert "content" in articles.keys()
assert "content_source_url" in articles.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_upload_media(self):
responses.add_callback(responses.POST, self.UPLOAD_URL, callback=self.upload_callback)
r = self.client.upload_media("test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_download_media(self):
responses.add_callback(responses.GET, self.DOWNLOAD_URL, callback=self.download_callback)
r = self.client.download_media("test")
assert isinstance(r, requests.Response)
@responses.activate
@add_token_response
def test_add_news(self):
responses.add_callback(responses.POST, self.ADD_NEWS_URL, callback=self.add_news_callback)
r = self.client.add_news(self.add_news_data)
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_upload_news_picture(self):
responses.add_callback(
responses.POST,
self.UPLOAD_PICTURE_URL,
callback=self.upload_picture_callback
)
r = self.client.upload_news_picture("test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_upload_permanent_media(self):
responses.add_callback(
responses.POST,
self.UPLOAD_P_URL,
callback=self.upload_p_media_callback)
r = self.client.upload_permanent_media("test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_download_permanent_media(self):
responses.add_callback(
responses.POST,
self.DOWNLOAD_P_URL,
callback=self.download_p_media_callback
)
r = self.client.download_permanent_media("test")
assert isinstance(r, requests.Response)
@responses.activate
@add_token_response
def test_delete_permanent_media(self):
responses.add_callback(
responses.POST,
self.DELETE_P_URL,
callback=self.delete_p_media_callback
)
r = self.client.delete_permanent_media("test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_update_news(self):
responses.add_callback(
responses.POST,
self.UPDATE_NEWS_URL,
callback=self.update_news_callback
)
r = self.client.update_news(self.update_data)
assert r == {"errcode": 0, "errmsg": "ok"}
class TestUploadVideoClass(BaseTestClass):
UPLOAD_VIDEO_URL = "https://api.weixin.qq.com/cgi-bin/material/add_material"
@staticmethod
def upload_video_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "type" in params.keys()
assert params["type"][0] == "video"
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_upload_video(self):
responses.add_callback(
responses.POST,
self.UPLOAD_VIDEO_URL,
callback=self.upload_video_callback
)
r = self.client.upload_permanent_video("test", "test", "test")
assert isinstance(r, requests.Response)
class TestMediaClass(BaseTestClass):
GET_URL = "https://api.weixin.qq.com/cgi-bin/material/get_materialcount"
GET_LIST_URL = "https://api.weixin.qq.com/cgi-bin/material/batchget_material"
@staticmethod
def get_media_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def get_media_list_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "type" in body.keys()
assert "offset" in body.keys()
assert "count" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_get_media(self):
responses.add_callback(responses.GET, self.GET_URL, callback=self.get_media_callback)
r = self.client.get_media_count()
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_get_media_list(self):
responses.add_callback(
responses.POST,
self.GET_LIST_URL,
callback=self.get_media_list_callback
)
r = self.client.get_media_list("test", "test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
class TestGetIpListClass(BaseTestClass):
GET_URL = "https://api.weixin.qq.com/cgi-bin/getcallbackip"
@staticmethod
def get_ip_list_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_get_ip_list(self):
responses.add_callback(responses.GET, self.GET_URL, callback=self.get_ip_list_callback)
r = self.client.get_ip_list()
assert r == {"errcode": 0, "errmsg": "ok"}
class TestCustomService(BaseTestClass):
ADD_URL = "https://api.weixin.qq.com/customservice/kfaccount/add"
UPDATE_URL = "https://api.weixin.qq.com/customservice/kfaccount/update"
DELETE_URL = "https://api.weixin.qq.com/customservice/kfaccount/del"
UPLOAD_URL = "http://api.weixin.qq.com/customservice/kfaccount/uploadheadimg"
GET_URL = "https://api.weixin.qq.com/cgi-bin/customservice/getkflist"
@staticmethod
def add_update_delete_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "kf_account" in body.keys()
assert "nickname" in body.keys()
assert "password" in body.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def upload_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def get_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_add_custom_service_account(self):
responses.add_callback(
responses.POST,
self.ADD_URL,
callback=self.add_update_delete_callback
)
r = self.client.add_custom_service_account("test", "test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_update_custom_service_account(self):
responses.add_callback(
responses.POST,
self.UPDATE_URL,
callback=self.add_update_delete_callback
)
r = self.client.update_custom_service_account("test", "test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_delete_custom_service_account(self):
responses.add_callback(
responses.POST,
self.DELETE_URL,
callback=self.add_update_delete_callback
)
r = self.client.delete_custom_service_account("test", "test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_upload_custom_service_account_avatar(self):
responses.add_callback(responses.POST, self.UPLOAD_URL, callback=self.upload_callback)
r = self.client.upload_custom_service_account_avatar("test", "test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_get_custom_service_account_list(self):
responses.add_callback(responses.GET, self.GET_URL, callback=self.get_callback)
r = self.client.get_custom_service_account_list()
assert r == {"errcode": 0, "errmsg": "ok"}
class TestQrcodeClass(BaseTestClass):
CREATE_URL = "https://api.weixin.qq.com/cgi-bin/qrcode/create"
SHOW_URL = "https://mp.weixin.qq.com/cgi-bin/showqrcode"
@staticmethod
def create_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "access_token" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@staticmethod
def show_callback(request):
params = urlparse.parse_qs(urlparse.urlparse(request.url).query)
assert "ticket" in params.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_create_qrcode(self):
responses.add_callback(responses.POST, self.CREATE_URL, callback=self.create_callback)
r = self.client.create_qrcode("test")
assert r == {"errcode": 0, "errmsg": "ok"}
@responses.activate
@add_token_response
def test_show_qrcode(self):
responses.add_callback(responses.GET, self.SHOW_URL, callback=self.show_callback)
r = self.client.show_qrcode("test")
assert isinstance(r, requests.Response)
class TestSendArticleMessagesClass(BaseTestClass):
URL = "https://api.weixin.qq.com/cgi-bin/message/custom/send"
@staticmethod
def article_callback(request):
body = json.loads(request.body.decode("utf-8"))
assert "touser" in body.keys()
assert "msgtype" in body.keys()
assert body["msgtype"] == "news"
assert "news" in body.keys()
for article in body["news"]["articles"]:
assert "title" in article.keys()
assert "description" in article.keys()
assert "url" in article.keys()
assert "picurl" in article.keys()
return 200, json_header, json.dumps({"errcode": 0, "errmsg": "ok"})
@responses.activate
@add_token_response
def test_send_article_messages(self):
responses.add_callback(responses.POST, self.URL, callback=self.article_callback)
from werobot.replies import Article
articles = []
for _ in range(0, 8):
articles.append(Article(*["test_title", "test_description", "test_img", "test_url"]))
r = self.client.send_article_message("test_id", articles)
assert r == {"errcode": 0, "errmsg": "ok"}
articles = []
for _ in range(0, 8):
articles.append({
"title": "test_title",
"description": "test_description",
"url": "test_url",
"picurl": "test_pic_url"
})
r = self.client.send_article_message("test_id", articles)
assert r == {"errcode": 0, "errmsg": "ok"}
| 36.190745
| 100
| 0.61578
|
4a196f54aa6a64dca205da3f108a7d32d24152e4
| 4,371
|
py
|
Python
|
config.py
|
ch-its/DIN-Group-Activity-Recognition-Benchmark
|
02d29decc7ed8c6c85bf53436956ef36f76e4872
|
[
"MIT"
] | 14
|
2021-11-29T08:11:07.000Z
|
2022-02-26T14:23:28.000Z
|
config.py
|
ch-its/DIN-Group-Activity-Recognition-Benchmark
|
02d29decc7ed8c6c85bf53436956ef36f76e4872
|
[
"MIT"
] | 9
|
2021-08-31T11:55:49.000Z
|
2021-11-21T03:29:33.000Z
|
config.py
|
ch-its/DIN-Group-Activity-Recognition-Benchmark
|
02d29decc7ed8c6c85bf53436956ef36f76e4872
|
[
"MIT"
] | 6
|
2021-09-16T11:41:54.000Z
|
2021-11-10T09:27:19.000Z
|
import time
import os
class Config(object):
"""
class to save config parameter
"""
def __init__(self, dataset_name):
# Global
self.image_size = 720, 1280 #input image size
self.batch_size = 32 #train batch size
self.test_batch_size = 8 #test batch size
self.num_boxes = 12 #max number of bounding boxes in each frame
# Gpu
self.use_gpu=True
self.use_multi_gpu=True
self.device_list="0,1,2,3" #id list of gpus used for training
# Dataset
assert(dataset_name in ['volleyball', 'collective'])
self.dataset_name=dataset_name
if dataset_name=='volleyball':
self.data_path = 'data/volleyball/videos' #data path for the volleyball dataset
self.train_seqs = [ 1,3,6,7,10,13,15,16,18,22,23,31,32,36,38,39,40,41,42,48,50,52,53,54,
0,2,8,12,17,19,24,26,27,28,30,33,46,49,51] #video id list of train set
self.test_seqs = [4,5,9,11,14,20,21,25,29,34,35,37,43,44,45,47] #video id list of test set
else:
self.data_path='data/collective' #data path for the collective dataset
self.test_seqs=[5,6,7,8,9,10,11,15,16,25,28,29]
self.train_seqs=[s for s in range(1,45) if s not in self.test_seqs]
# Backbone
self.backbone='res18'
self.crop_size = 5, 5 #crop size of roi align
self.train_backbone = False #if freeze the feature extraction part of network, True for stage 1, False for stage 2
self.out_size = 87, 157 #output feature map size of backbone
self.emb_features=1056 #output feature map channel of backbone
# Activity Action
self.num_actions = 9 #number of action categories
self.num_activities = 8 #number of activity categories
self.actions_loss_weight = 1.0 #weight used to balance action loss and activity loss
self.actions_weights = None
# Sample
self.num_frames = 3
self.num_before = 5
self.num_after = 4
# ARG params
self.num_features_boxes = 1024
self.num_features_relation=256
self.num_graph=16 #number of graphs
self.num_features_gcn=self.num_features_boxes
self.gcn_layers=1 #number of GCN layers
self.tau_sqrt=False
self.pos_threshold=0.2 #distance mask threshold in position relation
# Training Parameters
self.train_random_seed = 0
self.train_learning_rate = 1e-4 #initial learning rate
self.lr_plan = {11:3e-5, 21:1e-5} #change learning rate in these epochs
self.train_dropout_prob = 0.3 #dropout probability
self.weight_decay = 0 #l2 weight decay
self.max_epoch = 30 #max training epoch
self.test_interval_epoch = 1
# Exp
self.training_stage=1 #specify stage1 or stage2
self.stage1_model_path='' #path of the base model, need to be set in stage2
self.test_before_train=False
self.exp_note='Group-Activity-Recognition'
self.exp_name=None
self.set_bn_eval = False
self.inference_module_name = 'dynamic_volleyball'
# Dynamic Inference
self.stride = 1
self.ST_kernel_size = 3
self.dynamic_sampling = True
self.sampling_ratio = [1, 3] # [1,2,4]
self.group = 1
self.scale_factor = True
self.beta_factor = True
self.load_backbone_stage2 = False
self.parallel_inference = False
self.hierarchical_inference = False
self.lite_dim = None
self.num_DIM = 1
self.load_stage2model = False
self.stage2model = None
# Actor Transformer
self.temporal_pooled_first = False
# SACRF + BiUTE
self.halting_penalty = 0.0001
def init_config(self, need_new_folder=True):
if self.exp_name is None:
time_str=time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
self.exp_name='[%s_stage%d]<%s>'%(self.exp_note,self.training_stage,time_str)
self.result_path='result/%s'%self.exp_name
self.log_path='result/%s/log.txt'%self.exp_name
if need_new_folder:
os.mkdir(self.result_path)
| 37.358974
| 123
| 0.614505
|
4a196f7282c7a05afb85826eb0310c88ccaeae06
| 546
|
py
|
Python
|
appgen/appgen/admin.py
|
Ecotrust/madrona-app-generator
|
078d124a8aacadf8a151da7a5434f68868564431
|
[
"BSD-3-Clause"
] | null | null | null |
appgen/appgen/admin.py
|
Ecotrust/madrona-app-generator
|
078d124a8aacadf8a151da7a5434f68868564431
|
[
"BSD-3-Clause"
] | null | null | null |
appgen/appgen/admin.py
|
Ecotrust/madrona-app-generator
|
078d124a8aacadf8a151da7a5434f68868564431
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.gis import admin
from appgen.models import *
from appgen.forms import AppConfigForm
admin.site.register(UserFeature)
admin.site.register(BaseKml)
class WorldGeoAdmin(admin.OSMGeoAdmin):
default_lon = 0
default_lat = 0
default_zoom = 1
map_width = 600
map_height = 400
class AppGeoModelAdmin(WorldGeoAdmin):
form = AppConfigForm
list_display = ('app', 'wms', 'links', 'status', 'desc', 'data_list', 'command_html') # command_html must be last!
admin.site.register(AppConfig, AppGeoModelAdmin)
| 27.3
| 118
| 0.739927
|
4a19701561dbef42247da0d49d382e3de6e45233
| 672
|
py
|
Python
|
pygipo/management/commands/is_db_up.py
|
felixhummel/pygipo
|
e7323de052e3c7f44ec4912bddcbb58abebcc6bf
|
[
"MIT"
] | null | null | null |
pygipo/management/commands/is_db_up.py
|
felixhummel/pygipo
|
e7323de052e3c7f44ec4912bddcbb58abebcc6bf
|
[
"MIT"
] | null | null | null |
pygipo/management/commands/is_db_up.py
|
felixhummel/pygipo
|
e7323de052e3c7f44ec4912bddcbb58abebcc6bf
|
[
"MIT"
] | null | null | null |
# vim: set fileencoding=utf-8 filetype=python :
import django
from django.core.management.base import BaseCommand
from django.db import connection
EXPECTED_EXCEPTIONS = [
'Name does not resolve',
'the database system is starting up',
]
class Command(BaseCommand):
def handle(self, *args, **options):
try:
# this tries to run a statement using the credentials in settings.py
with connection.cursor() as cursor:
cursor.execute('SELECT 1')
except django.db.utils.OperationalError as e:
for exp in EXPECTED_EXCEPTIONS:
if exp in str(e):
raise SystemExit(1)
| 30.545455
| 80
| 0.642857
|
4a1970e0122c600b5ce85319ff6c687a355b5187
| 4,735
|
py
|
Python
|
oneflow_cambricon-cambricon/oneflow/python/test/ops/test_smooth_l1_loss.py
|
wanghongsheng01/oneflow_cambricon
|
187faaa2cb9ba995080ba22499b6219c2d36f0ac
|
[
"Apache-2.0"
] | null | null | null |
oneflow_cambricon-cambricon/oneflow/python/test/ops/test_smooth_l1_loss.py
|
wanghongsheng01/oneflow_cambricon
|
187faaa2cb9ba995080ba22499b6219c2d36f0ac
|
[
"Apache-2.0"
] | null | null | null |
oneflow_cambricon-cambricon/oneflow/python/test/ops/test_smooth_l1_loss.py
|
wanghongsheng01/oneflow_cambricon
|
187faaa2cb9ba995080ba22499b6219c2d36f0ac
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import uuid
from collections import OrderedDict
import os
import numpy as np
import oneflow as flow
import oneflow.typing as oft
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
def gen_numpy_data(prediction, label, beta=1.0):
original_shape = prediction.shape
elem_cnt = prediction.size
prediction = prediction.reshape(-1)
label = label.reshape(-1)
loss = np.zeros((elem_cnt)).astype(prediction.dtype)
prediction_grad = np.zeros((elem_cnt)).astype(prediction.dtype)
# Forward
for i in np.arange(elem_cnt):
abs_diff = abs(prediction[i] - label[i])
if abs_diff < beta:
loss[i] = 0.5 * abs_diff * abs_diff / beta
else:
loss[i] = abs_diff - 0.5 * beta
# Backward
for i in np.arange(elem_cnt):
diff = prediction[i] - label[i]
abs_diff = abs(diff)
if abs_diff < beta:
prediction_grad[i] = diff / beta
else:
prediction_grad[i] = np.sign(diff)
return {
"loss": loss.reshape(original_shape),
"prediction_grad": prediction_grad.reshape(original_shape),
}
@flow.unittest.skip_unless_1n1d()
class TestSmoothL1Loss(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_smooth_l1_loss(_):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["prediction_shape"] = [
(100,),
(10, 10),
]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["beta"] = [0, 0.5, 1]
for case in GenArgList(arg_dict):
device_type, prediction_shape, data_type, beta = case
assert flow.is_valid_device_tag(device_type)
assert data_type in ["float32", "double", "int8", "int32", "int64"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
prediction = np.random.randn(*prediction_shape).astype(
type_name_to_np_type[data_type]
)
label = np.random.randn(*prediction_shape).astype(
type_name_to_np_type[data_type]
)
np_result = gen_numpy_data(prediction, label, beta)
def assert_prediction_grad(b):
prediction_grad = np_result["prediction_grad"]
assert prediction_grad.dtype == type_name_to_np_type[data_type]
assert np.allclose(prediction_grad, b.numpy()), (
case,
prediction_grad,
b.numpy(),
)
@flow.global_function(type="train", function_config=func_config)
def TestJob(
prediction: oft.Numpy.Placeholder(
prediction_shape, dtype=type_name_to_flow_type[data_type]
),
label: oft.Numpy.Placeholder(
prediction_shape, dtype=type_name_to_flow_type[data_type]
),
):
v = flow.get_variable(
"prediction",
shape=prediction_shape,
dtype=type_name_to_flow_type[data_type],
initializer=flow.constant_initializer(0),
trainable=True,
)
flow.watch_diff(v, assert_prediction_grad)
prediction += v
with flow.scope.placement(device_type, "0:0"):
loss = flow.smooth_l1_loss(prediction, label, beta)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]),
momentum=0,
).minimize(loss)
return loss
loss_np = np_result["loss"]
assert loss_np.dtype == type_name_to_np_type[data_type]
loss = TestJob(prediction, label).get().numpy()
assert np.allclose(loss_np, loss), (case, loss_np, loss)
if __name__ == "__main__":
unittest.main()
| 36.145038
| 79
| 0.601478
|
4a19714332f0e9d061b6396d85de130800577bbf
| 3,058
|
py
|
Python
|
oo/carro_arthur.py
|
arthurbragav/pythonbirds
|
f653ac1038e571529f55d0e490b2a8bd193ad523
|
[
"MIT"
] | null | null | null |
oo/carro_arthur.py
|
arthurbragav/pythonbirds
|
f653ac1038e571529f55d0e490b2a8bd193ad523
|
[
"MIT"
] | null | null | null |
oo/carro_arthur.py
|
arthurbragav/pythonbirds
|
f653ac1038e571529f55d0e490b2a8bd193ad523
|
[
"MIT"
] | null | null | null |
"""
>>> # Testando motor
>>> motor = Motor()
>>> motor.velocidade
0
>>> motor.acelerar()
>>> motor.velocidade
1
>>> motor.acelerar()
>>> motor.velocidade
2
>>> motor.acelerar()
>>> motor.velocidade
3
>>> motor.frear()
>>> motor.velocidade
1
>>> motor.frear()
>>> motor.velocidade
0
>>> # Testando Direcao
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Norte'
>>> carro = Carro(direcao, motor)
>>> carro.calcular_velocidade()
0
>>> carro.acelerar()
>>> carro.calcular_velocidade()
1
>>> carro.acelerar()
>>> carro.calcular_velocidade()
2
>>> carro.frear()
>>> carro.calcular_velocidade()
0
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_direita()
>>> carro.calcular_direcao()
'Leste'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Oeste'
"""
class Carro:
def __init__(self, direcao, motor):
self.motor = motor
self.direcao = direcao
def calcular_velocidade(self):
return self.motor.velocidade
def acelerar(self):
self.motor.acelerar()
def frear(self):
self.motor.frear()
def girar_a_direita(self):
self.direcao.girar_a_direita()
def girar_a_esquerda(self):
self.direcao.girar_a_esquerda()
def calcular_direcao(self):
return self.direcao.valor
class Motor:
def __init__(self, velocidade=0):
self.velocidade = velocidade
def acelerar(self):
self.velocidade += 1
def frear(self):
self.velocidade -= 2
if self.velocidade < 0:
self.velocidade = 0
class Direcao:
def __init__(self, valor='Norte'):
self.valor = valor
def girar_a_direita(self):
if self.valor == 'Norte':
self.valor = 'Leste'
elif self.valor == 'Leste':
self.valor = 'Sul'
elif self.valor == 'Sul':
self.valor = 'Oeste'
elif self.valor == 'Oeste':
self.valor = 'Norte'
def girar_a_esquerda(self):
if self.valor == 'Norte':
self.valor = 'Oeste'
elif self.valor == 'Leste':
self.valor = 'Norte'
elif self.valor == 'Sul':
self.valor = 'Leste'
elif self.valor == 'Oeste':
self.valor = 'Sul'
def calcular_direcao(self):
return self.valor
| 22.15942
| 39
| 0.558535
|
4a1971561ec79f3a7d5ff2cb6f2a8748f9468d5a
| 17,365
|
py
|
Python
|
qcodes/instrument_drivers/AlazarTech/ATS9440.py
|
mizkulg/Qcodes
|
28448e2ce60041d436958a66529317d355ee4a9d
|
[
"MIT"
] | null | null | null |
qcodes/instrument_drivers/AlazarTech/ATS9440.py
|
mizkulg/Qcodes
|
28448e2ce60041d436958a66529317d355ee4a9d
|
[
"MIT"
] | 73
|
2020-10-08T09:28:41.000Z
|
2021-09-16T11:04:28.000Z
|
qcodes/instrument_drivers/AlazarTech/ATS9440.py
|
mizkulg/Qcodes
|
28448e2ce60041d436958a66529317d355ee4a9d
|
[
"MIT"
] | null | null | null |
from .ATS import AlazarTech_ATS
from .utils import TraceParameter
from qcodes.utils import validators
class AlazarTech_ATS9440(AlazarTech_ATS):
"""
This class is the driver for the ATS9440 board
it inherits from the ATS base class
"""
samples_divisor = 256
channels = 4
def __init__(self, name, **kwargs):
dll_path = '/usr/lib64/libATSApi.so'
super().__init__(name, dll_path=dll_path, **kwargs)
# add parameters
# ----- Parameters for the configuration of the board -----
self.add_parameter(name='clock_source',
parameter_class=TraceParameter,
get_cmd=None,
set_cmd=None,
label='Clock Source',
unit=None,
initial_value='INTERNAL_CLOCK',
val_mapping={'INTERNAL_CLOCK': 1,
'FAST_EXTERNAL_CLOCK': 2,
'SLOW_EXTERNAL_CLOCK': 4,
'EXTERNAL_CLOCK_10MHz_REF': 7})
self.add_parameter(name='external_sample_rate',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='External Sample Rate',
unit='S/s',
vals=validators.MultiType(validators.Ints(1000000, 125000000),
validators.Enum('UNDEFINED')),
initial_value='UNDEFINED')
self.add_parameter(name='sample_rate',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Internal Sample Rate',
unit='S/s',
initial_value=100000000,
val_mapping={1_000: 1,
2_000: 2,
5_000: 4,
10_000: 8,
20_000: 10,
50_000: 12,
100_000: 14,
200_000: 16,
500_000: 18,
1_000_000: 20,
2_000_000: 24,
5_000_000: 26,
10_000_000: 28,
20_000_000: 30,
50_000_000: 34,
100_000_000: 36,
125_000_000: 38,
'EXTERNAL_CLOCK': 64,
'UNDEFINED': 'UNDEFINED'})
self.add_parameter(name='clock_edge',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Clock Edge',
unit=None,
initial_value='CLOCK_EDGE_RISING',
val_mapping={'CLOCK_EDGE_RISING': 0,
'CLOCK_EDGE_FALLING': 1})
self.add_parameter(name='decimation',
get_cmd=None,
parameter_class=TraceParameter,
label='Decimation',
unit=None,
initial_value=1,
vals=validators.Ints(1, 100000))
for i in ['1', '2', '3', '4']:
self.add_parameter(name='coupling' + i,
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Coupling channel ' + i,
unit=None,
initial_value='DC',
val_mapping={'AC': 1, 'DC': 2})
self.add_parameter(name='channel_range' + i,
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Range channel ' + i,
unit='V',
initial_value=0.1,
val_mapping={0.1: 5,
0.2: 6,
0.4: 7,
1: 10,
2: 11,
4: 12})
self.add_parameter(name='impedance' + i,
get_cmd=None,
parameter_class=TraceParameter,
label='Impedance channel ' + i,
unit='Ohm',
initial_value=50,
val_mapping={50: 2})
self.add_parameter(name='bwlimit' + i,
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Bandwidth limit channel ' + i,
unit=None,
initial_value='DISABLED',
val_mapping={'DISABLED': 0,
'ENABLED': 1})
self.add_parameter(name='trigger_operation',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Trigger Operation',
unit=None,
initial_value='TRIG_ENGINE_OP_J',
val_mapping={'TRIG_ENGINE_OP_J': 0,
'TRIG_ENGINE_OP_K': 1,
'TRIG_ENGINE_OP_J_OR_K': 2,
'TRIG_ENGINE_OP_J_AND_K': 3,
'TRIG_ENGINE_OP_J_XOR_K': 4,
'TRIG_ENGINE_OP_J_AND_NOT_K': 5,
'TRIG_ENGINE_OP_NOT_J_AND_K': 6})
for i in ['1', '2']:
self.add_parameter(name='trigger_engine' + i,
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Trigger Engine ' + i,
unit=None,
initial_value='TRIG_ENGINE_' + ('J' if i == '1' else 'K'),
val_mapping={'TRIG_ENGINE_J': 0,
'TRIG_ENGINE_K': 1})
self.add_parameter(name='trigger_source' + i,
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Trigger Source ' + i,
unit=None,
initial_value='EXTERNAL',
val_mapping={'CHANNEL_A': 0,
'CHANNEL_B': 1,
'EXTERNAL': 2,
'DISABLE': 3,
'CHANNEL_C': 4,
'CHANNEL_D': 5})
self.add_parameter(name='trigger_slope' + i,
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Trigger Slope ' + i,
unit=None,
initial_value='TRIG_SLOPE_POSITIVE',
val_mapping={'TRIG_SLOPE_POSITIVE': 1,
'TRIG_SLOPE_NEGATIVE': 2})
self.add_parameter(name='trigger_level' + i,
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Trigger Level ' + i,
unit=None,
initial_value=140,
vals=validators.Ints(0, 255))
self.add_parameter(name='external_trigger_coupling',
get_cmd=None,
parameter_class=TraceParameter,
label='External Trigger Coupling',
unit=None,
initial_value='DC',
val_mapping={'AC': 1, 'DC': 2})
self.add_parameter(name='external_trigger_range',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='External Trigger Range',
unit=None,
initial_value='ETR_5V',
val_mapping={'ETR_5V': 0, 'ETR_TTL': 2})
self.add_parameter(name='trigger_delay',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Trigger Delay',
unit='Sample clock cycles',
initial_value=0,
vals=validators.Multiples(divisor=8, min_value=0))
self.add_parameter(name='timeout_ticks',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='Timeout Ticks',
unit='10 us',
initial_value=0,
vals=validators.Ints(min_value=0))
# The card has two AUX I/O ports, which only AUX 2 is controlled by
# the software (AUX 1 is controlled by the firmware). The user should
# use AUX 2 for controlling the AUX via aux_io_mode and aux_io_param.
self.add_parameter(name='aux_io_mode',
get_cmd=None,
set_cmd=None,
parameter_class=TraceParameter,
label='AUX I/O Mode',
unit=None,
initial_value='AUX_OUT_TRIGGER',
val_mapping={'AUX_OUT_TRIGGER': 0,
'AUX_IN_TRIGGER_ENABLE': 1,
'AUX_IN_AUXILIARY': 13})
self.add_parameter(name='aux_io_param',
get_cmd=None,
parameter_class=TraceParameter,
label='AUX I/O Param',
unit=None,
initial_value='NONE',
val_mapping={'NONE': 0,
'TRIG_SLOPE_POSITIVE': 1,
'TRIG_SLOPE_NEGATIVE': 2})
# The above parameters are important for preparing the card.
self.add_parameter(name='mode',
label='Acquisition mode',
unit=None,
initial_value='NPT',
get_cmd=None,
set_cmd=None,
val_mapping={'NPT': 0x200,
'TS': 0x400})
self.add_parameter(name='samples_per_record',
label='Samples per Record',
unit=None,
initial_value=1024,
get_cmd=None,
set_cmd=None,
vals=validators.Multiples(
divisor=self.samples_divisor, min_value=256))
self.add_parameter(name='records_per_buffer',
label='Records per Buffer',
unit=None,
initial_value=10,
get_cmd=None,
set_cmd=None,
vals=validators.Ints(min_value=0))
self.add_parameter(name='buffers_per_acquisition',
label='Buffers per Acquisition',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value=10,
vals=validators.Ints(min_value=0))
self.add_parameter(name='channel_selection',
label='Channel Selection',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value='AB',
val_mapping={'A': 1,
'B': 2,
'AB': 3,
'C': 4,
'AC': 5,
'BC': 6,
'D': 7,
'AD': 8,
'BD': 9,
'CD': 10,
'ABCD': 11})
self.add_parameter(name='transfer_offset',
label='Transfer Offset',
unit='Samples',
get_cmd=None,
set_cmd=None,
initial_value=0,
vals=validators.Ints(min_value=0))
self.add_parameter(name='external_startcapture',
label='External Startcapture',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value='ENABLED',
val_mapping={'DISABLED': 0X0,
'ENABLED': 0x1})
self.add_parameter(name='enable_record_headers',
label='Enable Record Headers',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value='DISABLED',
val_mapping={'DISABLED': 0x0,
'ENABLED': 0x8})
self.add_parameter(name='alloc_buffers',
label='Alloc Buffers',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value='DISABLED',
val_mapping={'DISABLED': 0x0,
'ENABLED': 0x20})
self.add_parameter(name='fifo_only_streaming',
label='Fifo Only Streaming',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value='DISABLED',
val_mapping={'DISABLED': 0x0,
'ENABLED': 0x800})
self.add_parameter(name='interleave_samples',
label='Interleave Samples',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value='DISABLED',
val_mapping={'DISABLED': 0x0,
'ENABLED': 0x1000})
self.add_parameter(name='get_processed_data',
label='Get Processed Data',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value='DISABLED',
val_mapping={'DISABLED': 0x0,
'ENABLED': 0x2000})
self.add_parameter(name='allocated_buffers',
label='Allocated Buffers',
unit=None,
get_cmd=None,
set_cmd=None,
initial_value=4,
vals=validators.Ints(min_value=0))
self.add_parameter(name='buffer_timeout',
label='Buffer Timeout',
unit='ms',
get_cmd=None,
set_cmd=None,
initial_value=1000,
vals=validators.Ints(min_value=0))
| 50.043228
| 89
| 0.364008
|
4a1972100a9b8abc15ee298b37472c14ba649dac
| 6,699
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/nitrobacterwinogradskyi.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/nitrobacterwinogradskyi.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/nitrobacterwinogradskyi.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Nitrobacter winogradskyi.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:28:15.057404
The undirected graph Nitrobacter winogradskyi has 3081 nodes and 199697
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04209 and has 20 connected components, where the component
with most nodes has 3039 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 104, the mean node degree is 129.63,
and the node degree mode is 1. The top 5 most central nodes are 323098.Nwi_0357
(degree 1095), 323098.Nwi_2641 (degree 923), 323098.Nwi_0119 (degree 840),
323098.Nwi_2143 (degree 823) and 323098.Nwi_0197 (degree 754).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import NitrobacterWinogradskyi
# Then load the graph
graph = NitrobacterWinogradskyi()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def NitrobacterWinogradskyi(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Nitrobacter winogradskyi graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Nitrobacter winogradskyi graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:28:15.057404
The undirected graph Nitrobacter winogradskyi has 3081 nodes and 199697
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04209 and has 20 connected components, where the component
with most nodes has 3039 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 104, the mean node degree is 129.63,
and the node degree mode is 1. The top 5 most central nodes are 323098.Nwi_0357
(degree 1095), 323098.Nwi_2641 (degree 923), 323098.Nwi_0119 (degree 840),
323098.Nwi_2143 (degree 823) and 323098.Nwi_0197 (degree 754).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import NitrobacterWinogradskyi
# Then load the graph
graph = NitrobacterWinogradskyi()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="NitrobacterWinogradskyi",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.444444
| 223
| 0.704284
|
4a19730e1e79089bb8673e0b22b5c67959bd9435
| 2,974
|
py
|
Python
|
glance/tests/unit/base.py
|
cloudbau/glance
|
616b097c052f5bf59b05326ed1d2d1ae1c703dc9
|
[
"Apache-2.0"
] | 1
|
2018-05-03T03:52:39.000Z
|
2018-05-03T03:52:39.000Z
|
glance/tests/unit/base.py
|
cloudbau/glance
|
616b097c052f5bf59b05326ed1d2d1ae1c703dc9
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/unit/base.py
|
cloudbau/glance
|
616b097c052f5bf59b05326ed1d2d1ae1c703dc9
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import shutil
import fixtures
from oslo.config import cfg
import stubout
from glance import store
from glance.store import location
from glance.store import sheepdog
from glance.tests import stubs
from glance.tests import utils as test_utils
CONF = cfg.CONF
CONF.import_opt('filesystem_store_datadir', 'glance.store.filesystem')
CONF.import_opt('sql_connection', 'glance.db.sqlalchemy.api')
class StoreClearingUnitTest(test_utils.BaseTestCase):
def setUp(self):
super(StoreClearingUnitTest, self).setUp()
# Ensure stores + locations cleared
location.SCHEME_TO_CLS_MAP = {}
self._create_stores()
self.addCleanup(setattr, location, 'SCHEME_TO_CLS_MAP', dict())
def _create_stores(self):
"""Create known stores. Mock out sheepdog's subprocess dependency
on collie.
"""
self.stubs.Set(sheepdog.Store, 'configure_add', lambda x: None)
store.create_stores()
class IsolatedUnitTest(StoreClearingUnitTest):
"""
Unit test case that establishes a mock environment within
a testing directory (in isolation)
"""
registry = None
def setUp(self):
super(IsolatedUnitTest, self).setUp()
self.test_dir = self.useFixture(fixtures.TempDir()).path
policy_file = self._copy_data_file('policy.json', self.test_dir)
self.config(sql_connection='sqlite://',
verbose=False,
debug=False,
default_store='filesystem',
filesystem_store_datadir=os.path.join(self.test_dir),
policy_file=policy_file,
lock_path=os.path.join(self.test_dir))
stubs.stub_out_registry_and_store_server(self.stubs,
self.test_dir,
registry=self.registry)
def _copy_data_file(self, file_name, dst_dir):
src_file_name = os.path.join('glance/tests/etc', file_name)
shutil.copy(src_file_name, dst_dir)
dst_file_name = os.path.join(dst_dir, file_name)
return dst_file_name
def set_policy_rules(self, rules):
fap = open(CONF.policy_file, 'w')
fap.write(json.dumps(rules))
fap.close()
| 33.795455
| 78
| 0.666779
|
4a1973d34ed2552467fa429d40d5b02d46ba83c5
| 9,017
|
py
|
Python
|
lib/helpers/theaudiodb.py
|
cartmandos/script.module.metadatautils
|
536d935f91691d3b73861b7f5aa235bc182cdf07
|
[
"Apache-2.0"
] | 1
|
2019-03-24T00:43:46.000Z
|
2019-03-24T00:43:46.000Z
|
lib/helpers/theaudiodb.py
|
cartmandos/script.module.metadatautils
|
536d935f91691d3b73861b7f5aa235bc182cdf07
|
[
"Apache-2.0"
] | 1
|
2019-09-07T13:47:28.000Z
|
2019-09-07T13:47:28.000Z
|
lib/helpers/theaudiodb.py
|
cartmandos/script.module.metadatautils
|
536d935f91691d3b73861b7f5aa235bc182cdf07
|
[
"Apache-2.0"
] | 5
|
2019-04-07T01:40:45.000Z
|
2021-01-05T10:17:06.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
script.module.metadatautils
theaudiodb.py
Get metadata from theaudiodb
"""
from utils import get_json, strip_newlines, KODI_LANGUAGE, get_compare_string
from simplecache import use_cache
import xbmcvfs
class TheAudioDb(object):
"""get metadata from the audiodb"""
api_key = "12376f5352254d85853987"
ignore_cache = False
def __init__(self, simplecache=None):
"""Initialize - optionaly provide simplecache object"""
if not simplecache:
from simplecache import SimpleCache
self.cache = SimpleCache()
else:
self.cache = simplecache
def search(self, artist, album, track):
"""get musicbrainz id by query of artist, album and/or track"""
artistid = ""
albumid = ""
artist = artist.lower()
params = {'s': artist, 'a': album}
data = self.get_data("searchalbum.php", params)
if data and data.get("album") and len(data.get("album")) > 0:
adbdetails = data["album"][0]
# safety check - only allow exact artist match
foundartist = adbdetails.get("strArtist", "").lower()
if foundartist and get_compare_string(foundartist) == get_compare_string(artist):
albumid = adbdetails.get("strMusicBrainzID", "")
artistid = adbdetails.get("strMusicBrainzArtistID", "")
if (not artistid or not albumid) and artist and track:
params = {'s': artist, 't': track}
data = self.get_data("searchtrack.php", params)
if data and data.get("track") and len(data.get("track")) > 0:
adbdetails = data["track"][0]
# safety check - only allow exact artist match
foundartist = adbdetails.get("strArtist", "").lower()
if foundartist and get_compare_string(foundartist) == get_compare_string(artist):
albumid = adbdetails.get("strMusicBrainzID", "")
artistid = adbdetails.get("strMusicBrainzArtistID", "")
return artistid, albumid
def get_artist_id(self, artist, album, track):
"""get musicbrainz id by query of artist, album and/or track"""
return self.search(artist, album, track)[0]
def get_album_id(self, artist, album, track):
"""get musicbrainz id by query of artist, album and/or track"""
return self.search(artist, album, track)[1]
def artist_info(self, artist_id):
"""get artist metadata by musicbrainz id"""
details = {"art": {}}
data = self.get_data("/artist-mb.php", {'i': artist_id})
if data and data.get("artists"):
adbdetails = data["artists"][0]
if adbdetails.get("strArtistBanner") and xbmcvfs.exists(adbdetails.get("strArtistBanner")):
details["art"]["banner"] = adbdetails.get("strArtistBanner")
details["art"]["banners"] = [adbdetails.get("strArtistBanner")]
details["art"]["fanarts"] = []
if adbdetails.get("strArtistFanart") and xbmcvfs.exists(adbdetails.get("strArtistFanart")):
details["art"]["fanart"] = adbdetails.get("strArtistFanart")
details["art"]["fanarts"].append(adbdetails.get("strArtistFanart"))
if adbdetails.get("strArtistFanart2") and xbmcvfs.exists(adbdetails.get("strArtistFanart2")):
details["art"]["fanarts"].append(adbdetails.get("strArtistFanart2"))
if adbdetails.get("strArtistFanart3") and xbmcvfs.exists(adbdetails.get("strArtistFanart3")):
details["art"]["fanarts"].append(adbdetails.get("strArtistFanart3"))
if adbdetails.get("strArtistLogo") and xbmcvfs.exists(adbdetails.get("strArtistLogo")):
details["art"]["clearlogo"] = adbdetails.get("strArtistLogo")
details["art"]["clearlogos"] = [adbdetails.get("strArtistLogo")]
if adbdetails.get("strArtistClearart") and xbmcvfs.exists(adbdetails.get("strArtistClearart")):
details["art"]["clearart"] = adbdetails.get("strArtistClearart")
details["art"]["cleararts"] = [adbdetails.get("strArtistClearart")]
if adbdetails.get("strArtistThumb") and xbmcvfs.exists(adbdetails.get("strArtistThumb")):
details["art"]["thumb"] = adbdetails["strArtistThumb"]
details["art"]["thumbs"] = [adbdetails["strArtistThumb"]]
if adbdetails.get("strBiography" + KODI_LANGUAGE.upper()):
details["plot"] = adbdetails["strBiography" + KODI_LANGUAGE.upper()]
if adbdetails.get("strBiographyEN") and not details.get("plot"):
details["plot"] = adbdetails.get("strBiographyEN")
if details.get("plot"):
details["plot"] = strip_newlines(details["plot"])
if adbdetails.get("strArtistAlternate"):
details["alternamename"] = adbdetails["strArtistAlternate"]
if adbdetails.get("intFormedYear"):
details["formed"] = adbdetails["intFormedYear"]
if adbdetails.get("intBornYear"):
details["born"] = adbdetails["intBornYear"]
if adbdetails.get("intDiedYear"):
details["died"] = adbdetails["intDiedYear"]
if adbdetails.get("strDisbanded"):
details["disbanded"] = adbdetails["strDisbanded"]
if adbdetails.get("strStyle"):
details["style"] = adbdetails["strStyle"].split("/")
if adbdetails.get("strGenre"):
details["genre"] = adbdetails["strGenre"].split("/")
if adbdetails.get("strMood"):
details["mood"] = adbdetails["strMood"].split("/")
if adbdetails.get("strWebsite"):
details["homepage"] = adbdetails["strWebsite"]
if adbdetails.get("strFacebook"):
details["facebook"] = adbdetails["strFacebook"]
if adbdetails.get("strTwitter"):
details["twitter"] = adbdetails["strTwitter"]
if adbdetails.get("strGender"):
details["gender"] = adbdetails["strGender"]
if adbdetails.get("intMembers"):
details["members"] = adbdetails["intMembers"]
if adbdetails.get("strCountry"):
details["country"] = adbdetails["strCountry"].split(", ")
return details
def album_info(self, album_id):
"""get album metadata by musicbrainz id"""
details = {"art": {}}
data = self.get_data("/album-mb.php", {'i': album_id})
if data and data.get("album"):
adbdetails = data["album"][0]
if adbdetails.get("strAlbumThumb") and xbmcvfs.exists(adbdetails.get("strAlbumThumb")):
details["art"]["thumb"] = adbdetails.get("strAlbumThumb")
details["art"]["thumbs"] = [adbdetails.get("strAlbumThumb")]
if adbdetails.get("strAlbumCDart") and xbmcvfs.exists(adbdetails.get("strAlbumCDart")):
details["art"]["discart"] = adbdetails.get("strAlbumCDart")
details["art"]["discarts"] = [adbdetails.get("strAlbumCDart")]
if adbdetails.get("strAlbumSpine") and xbmcvfs.exists(adbdetails.get("strAlbumSpine")):
details["art"]["spine"] = adbdetails.get("strAlbumSpine")
if adbdetails.get("strAlbumThumbBack") and xbmcvfs.exists(adbdetails.get("strAlbumThumbBack")):
details["art"]["thumbback"] = adbdetails.get("strAlbumThumbBack")
if adbdetails.get("strDescription%s" % KODI_LANGUAGE.upper()):
details["plot"] = adbdetails.get("strDescription%s" % KODI_LANGUAGE.upper())
if not details.get("plot") and adbdetails.get("strDescriptionEN"):
details["plot"] = adbdetails.get("strDescriptionEN")
if details.get("plot"):
details["plot"] = strip_newlines(details["plot"])
if adbdetails.get("strGenre"):
details["genre"] = adbdetails["strGenre"].split("/")
if adbdetails.get("strStyle"):
details["style"] = adbdetails["strStyle"].split("/")
if adbdetails.get("strMood"):
details["mood"] = adbdetails["strMood"].split("/")
if adbdetails.get("intYearReleased"):
details["year"] = adbdetails["intYearReleased"]
if adbdetails.get("intScore"):
details["rating"] = adbdetails["intScore"]
if adbdetails.get("strAlbum"):
details["title"] = adbdetails["strAlbum"]
return details
@use_cache(60)
def get_data(self, endpoint, params):
"""helper method to get data from theaudiodb json API"""
endpoint = 'http://www.theaudiodb.com/api/v1/json/%s/%s' % (self.api_key, endpoint)
data = get_json(endpoint, params)
if data:
return data
else:
return {}
| 53.35503
| 107
| 0.594433
|
4a197415b48bec360d540b8b3d1f721ce870b7bf
| 4,395
|
py
|
Python
|
moai/engine/lightning/test/tester.py
|
ai-in-motion/moai
|
e38cac046c059d2e2331ef4883bbabc5a500a5cf
|
[
"Apache-2.0"
] | 10
|
2021-04-02T11:21:33.000Z
|
2022-01-18T18:32:32.000Z
|
moai/engine/lightning/test/tester.py
|
ai-in-motion/moai
|
e38cac046c059d2e2331ef4883bbabc5a500a5cf
|
[
"Apache-2.0"
] | 1
|
2022-03-22T20:10:55.000Z
|
2022-03-24T13:11:02.000Z
|
moai/engine/lightning/test/tester.py
|
ai-in-motion/moai
|
e38cac046c059d2e2331ef4883bbabc5a500a5cf
|
[
"Apache-2.0"
] | 3
|
2021-05-16T20:47:40.000Z
|
2021-12-01T21:15:36.000Z
|
import moai.checkpoint.lightning as mickpt
import moai.log.lightning as milog
import pytorch_lightning
import hydra.utils as hyu
import omegaconf.omegaconf
import typing
__all__ = ["LightningTester"]
class LightningTester(pytorch_lightning.Trainer):
def __init__(self,
logging: omegaconf.DictConfig=None,
callbacks: omegaconf.DictConfig=None,
default_root_dir: typing.Optional[str]=None,
process_position: int=0,
num_nodes: int=1,
num_processes: int=1,
gpus: typing.Optional[typing.Union[typing.List[int], str, int]]=None,
tpu_cores: typing.Optional[typing.Union[typing.List[int], str, int]]=None,
log_gpu_memory: typing.Optional[str]=None,
max_steps: typing.Optional[int]=None,
min_steps: typing.Optional[int]=None,
limit_test_batches: typing.Union[int, float]=1.0,
accelerator: typing.Optional[typing.Union[str, pytorch_lightning.accelerators.Accelerator]]=None,
sync_batchnorm: bool=False,
precision: int=32,
weights_summary: typing.Optional[str]='full',
weights_save_path: typing.Optional[str]=None,
truncated_bptt_steps: typing.Optional[int]=None,
resume_from_checkpoint: typing.Optional[str]=None,
profiler: typing.Optional[typing.Union[pytorch_lightning.profiler.BaseProfiler, bool, str]]=None,
benchmark: bool=False,
deterministic: bool=True,
replace_sampler_ddp: bool=True,
prepare_data_per_node: bool=True,
plugins: typing.Optional[list]=None,
amp_backend: str='native',
amp_level: str='O2',
distributed_backend: typing.Optional[str]=None,
**kwargs
):
logger = hyu.instantiate(logging)\
if logging is not None else milog.NoOp()
pytl_callbacks = [hyu.instantiate(c) for c in callbacks.values()]\
if callbacks is not None else []
super(LightningTester, self).__init__(
logger=logger,
checkpoint_callback=False,
callbacks=pytl_callbacks,
default_root_dir=None if not default_root_dir else default_root_dir,
gradient_clip_val=0.0,
process_position=process_position,
num_nodes=num_nodes,
gpus=gpus,
auto_select_gpus=False,
tpu_cores=tpu_cores,
log_gpu_memory=log_gpu_memory,
progress_bar_refresh_rate=1,
overfit_batches=0.0,
track_grad_norm=-1,
check_val_every_n_epoch=1,
fast_dev_run=False,
accumulate_grad_batches=1,
max_epochs=1,
min_epochs=1,
max_steps=max_steps,
min_steps=min_steps,
limit_train_batches=1.0,
limit_val_batches=1.0,
limit_test_batches=limit_test_batches,
val_check_interval=1,
flush_logs_every_n_steps=1,
log_every_n_steps=1,
accelerator=accelerator,
sync_batchnorm=sync_batchnorm,
precision=precision,
weights_summary=weights_summary,
weights_save_path=weights_save_path,
num_sanity_val_steps=0,
truncated_bptt_steps=truncated_bptt_steps,
resume_from_checkpoint=resume_from_checkpoint,
profiler=profiler,
benchmark=benchmark,
deterministic=deterministic,
reload_dataloaders_every_epoch=False,
auto_lr_find=False,
replace_sampler_ddp=replace_sampler_ddp,
terminate_on_nan=False,
auto_scale_batch_size=False,
prepare_data_per_node=prepare_data_per_node,
plugins=plugins,
amp_backend=amp_backend,
distributed_backend=distributed_backend,
amp_level=amp_level,
automatic_optimization=False,
**kwargs
)
def run(self, model):
return self.test(model, verbose=False)
| 43.514851
| 123
| 0.586348
|
4a1974b8d3c398edbb170c203d02cd75fd9d56b7
| 1,405
|
py
|
Python
|
src/APIs/SpeechAPI.py
|
dinispeixoto/Kaydara
|
5a22be3f9e931a00f3f3c9bcd1dbda8e1cce0b4d
|
[
"MIT"
] | null | null | null |
src/APIs/SpeechAPI.py
|
dinispeixoto/Kaydara
|
5a22be3f9e931a00f3f3c9bcd1dbda8e1cce0b4d
|
[
"MIT"
] | 3
|
2021-02-08T20:22:41.000Z
|
2022-03-25T14:38:24.000Z
|
src/APIs/SpeechAPI.py
|
dinispeixoto/Kaydara
|
5a22be3f9e931a00f3f3c9bcd1dbda8e1cce0b4d
|
[
"MIT"
] | null | null | null |
from pydub import AudioSegment
import os, json, requests, urllib
# Environment variables on heroku
USERNAME = os.environ['SPEECH_USERNAME']
PASSWORD = os.environ['SPEECH_PASSWORD']
# send audio file and return the transcript
def send_audio(audio_url):
headers = {'Content-Type': 'audio/flac',}
params = {'model': 'en-US_NarrowbandModel',}
audio_file = __download(audio_url)
__convert(audio_file)
data = open(audio_file + '.flac', 'rb').read()
response = requests.post('https://stream.watsonplatform.net/speech-to-text/api/v1/recognize',
headers=headers, data=data, params=params, auth=(USERNAME, PASSWORD))
response_decoded = response.content.decode("utf-8")
dict_response = json.loads(response_decoded)
print(dict_response)
os.remove(audio_file)
os.remove(audio_file + '.flac')
if dict_response['results']:
return dict_response['results'][0]['alternatives'][0]['transcript']
else:
return 'Nothing'
def __download(audio_url):
webFile = urllib.request.urlopen(audio_url)
fileName = audio_url.split('/')[-1]
localFile = open(fileName, 'wb')
localFile.write(webFile.read())
webFile.close()
localFile.close()
return fileName
def __convert(audio):
AudioSegment.from_file(audio).export(audio + '.flac', format='flac')
| 31.931818
| 100
| 0.659786
|
4a1974d3c2258d2ac81762c5df24f08cb6446aa6
| 2,393
|
py
|
Python
|
pakage/endreader.py
|
Cubestudio001/cEncrypter
|
3514c692616ce02af0002dee36f6432f9d9023d2
|
[
"Apache-2.0"
] | null | null | null |
pakage/endreader.py
|
Cubestudio001/cEncrypter
|
3514c692616ce02af0002dee36f6432f9d9023d2
|
[
"Apache-2.0"
] | null | null | null |
pakage/endreader.py
|
Cubestudio001/cEncrypter
|
3514c692616ce02af0002dee36f6432f9d9023d2
|
[
"Apache-2.0"
] | null | null | null |
import getpass
def decode_string(index,ens="$infile:<red ascii>$.:dt$,:sq$/:lr$\:rl$;:ff$':sg$\":db",createfile=False):
'''
This function still requst you to verfiry the legitimacy of input value
But an illegal input won\'t lead a fatal error
index -> input
ens —> standard using
createfile -> Whether create file on PATH
'''
#此版本后ens采用已读取的str形式传入
#分割加密字符串,去除乱码
index = index.split("$")
for i in range(len(index)):
if index[i] == '':
index.pop(i)
index.pop(0)
index.pop(len(index)-1)
__return = ""
#读取标准库
std = ens.split("$")
#分割
#删除无效数据
for s in range(len(std)):
if std[s-1] == '':
std.pop(s)
__ens = {}
for i in std:
b = i.split(':')
if b[0] != 'infile':
try:
__ens[b[1]] = b[0]
except:
continue
for s in index:
try:
__return = __return + __ens[s]
except:
__return = __return + str(chr(int(s)))
return __return
def decode_file(filename,ens="$infile:<red ascii>$.:dt$,:sq$/:lr$\:rl$;:ff$':sg$\":db",createfile=False):
'''
This function still requst you to verfiry the legitimacy of input value
An illegal input will lead a fatal error
filename -> input file name
ens —> standard using
createfile -> Whether create file on PATH
'''
std = open(ens,'r',encoding="utf-8")
#分割加密字符串,去除乱码
index = open(filename,'r',encoding='utf-8')
index = index.split("$")
for i in range(len(index)):
if index[i] == '':
index.pop(i)
index.pop(0)
index.pop(len(index)-1)
__return = ""
#读取标准库
std1 = std.read()
std.close()
std = std1.split("$")
#分割
#删除无效数据
for s in range(len(std)):
if std[s-1] == '':
std.pop(s)
__ens = {}
for i in std:
b = i.split(':')
if b[0] != 'infile':
try:
__ens[b[1]] = b[0]
except:
continue
for s in index:
try:
__return = __return + __ens[s]
except:
__return = __return + str(chr(int(s)))
return __return
| 23.460784
| 106
| 0.475972
|
4a1975a713ac9f120da3d21b1240bd84da2327aa
| 8,937
|
py
|
Python
|
cptest.py
|
outofmbufs/imagetools
|
a4d35744f7e67a4b824026762b758257fb2d3994
|
[
"MIT"
] | null | null | null |
cptest.py
|
outofmbufs/imagetools
|
a4d35744f7e67a4b824026762b758257fb2d3994
|
[
"MIT"
] | null | null | null |
cptest.py
|
outofmbufs/imagetools
|
a4d35744f7e67a4b824026762b758257fb2d3994
|
[
"MIT"
] | null | null | null |
# tests for croppan
import json
import os
import tempfile
import unittest
from croppan import expand_pans, gen_panspecs, PanSpec
from contextlib import contextmanager
class TestMethods(unittest.TestCase):
NDN = 200 # number of dummy names.
DN = None # the actual dummy names
@classmethod
def makedummynames(cls):
if cls.NDN < 10:
raise ValueError(f"NDN ({cls.NDN}) too small. Minimum is 10.")
# find a directory name that does not exist, starting with "/X"
# (almost always sufficient) and adding additional X's as necessary
d = "/X"
while True:
try:
with open(d) as _:
pass
except FileNotFoundError:
break
except IsADirectoryError:
pass
d += "X"
# make the dummynames with the non-existent directory prefix
cls.DN = [d + f"/F{i:03d}" for i in range(cls.NDN)]
def setUp(self):
if self.DN is None:
self.makedummynames()
@staticmethod
def cbstr(cropbox):
"""Return a string suitable for gen_waypoints from a crop box"""
return f"{cropbox[0]},{cropbox[1]},{cropbox[2]},{cropbox[3]}"
def checkallnames(self, cropspecs):
self.assertEqual(len(cropspecs), len(self.DN))
for i, t in enumerate(cropspecs):
self.assertEqual(t[0], self.DN[i])
def test0(self):
# test basic interpolation
crop_A = [0, 10, 200, 210]
crop_B = [2, 12, 202, 212]
crop_M = [1, 11, 201, 211] # hand-calculated midpoint
p = PanSpec(image0=self.DN[0], crop0=crop_A,
image1=self.DN[2], crop1=crop_B)
crops = [crop_A, crop_M, crop_B]
for i, t in enumerate(expand_pans(self.DN, [p])):
self.assertEqual(t[0], self.DN[i])
if i < len(crops):
self.assertEqual(t[1], crops[i])
else:
self.assertEqual(t[1], crop_B)
def test1(self):
# like test0, just more, and using JSON input format
halfNDN = self.NDN // 2
crop_A = [0, 10, 200, 210]
crop_B = [x + halfNDN for x in crop_A]
pans = list(gen_panspecs(json.dumps(
[{'image0': self.DN[0], 'crop0': self.cbstr(crop_A),
'image1': self.DN[halfNDN], 'crop1': self.cbstr(crop_B)}])))
xp = list(expand_pans(self.DN, pans))
# all the file names should be in the resulting expansion
self.checkallnames(xp)
# the crop box should have been interpolated one unit at a time
for i in range(halfNDN):
self.assertEqual(xp[i][1], [x + i for x in crop_A])
# and the rest should be all the last one
for i in range(halfNDN+1, self.NDN):
self.assertEqual(xp[i][1], crop_B)
def test2(self):
# test basic interpolation not starting at the first file
offset = 3
npan = 4
crop_A = [0, 10, 200, 210]
crop_B = [x + npan for x in crop_A]
pans = [PanSpec(image0=self.DN[offset], crop0=crop_A,
image1=self.DN[offset+npan], crop1=crop_B)]
xp = list(expand_pans(self.DN, pans))
# all the file names should be in the resulting expansion
self.checkallnames(xp)
# the initial images, including the start of the pan, should all
# be crop_A (inferred initial crop)
for i in range(offset+1):
with self.subTest(i=i):
self.assertEqual(xp[i][1], crop_A)
# the next npan should all increase by 1 (based on how crop_B was made)
for i in range(npan):
self.assertEqual(xp[i+offset][1],
[crop_A[k] + i for k in (0, 1, 2, 3)])
def test3(self):
# like test1 but go up to a midpoint and then back down
# want an even number of test cases
if (self.NDN // 2) * 2 != self.NDN:
ntests = self.NDN - 1
else:
ntests = self.NDN
halfNDN = ntests // 2
crop_A = [0, 10, 200, 210]
crop_B = [x + halfNDN - 1 for x in crop_A]
crop_C = crop_A
pans = list(gen_panspecs(json.dumps(
[{'image0': self.DN[0], 'crop0': self.cbstr(crop_A),
'image1': self.DN[halfNDN-1], 'crop1': self.cbstr(crop_B)},
{'image0': self.DN[halfNDN], 'crop0': None,
'image1': self.DN[ntests-1], 'crop1': self.cbstr(crop_C)}])))
xp = list(expand_pans(self.DN, pans))
self.checkallnames(xp)
# the way up...
for i in range(halfNDN):
with self.subTest(i=i):
self.assertEqual(xp[i][1], [x + i for x in crop_A])
# and the way back down...
for i in range(halfNDN, ntests):
with self.subTest(i=i):
self.assertEqual(xp[i][1],
[x - (i - halfNDN) for x in crop_B])
def test4(self):
# test the repeat 'n' times single file form
repeater = 20
crop0 = [0, 10, 200, 210]
crop1 = [x + repeater - 1 for x in crop0]
pans = [PanSpec(image0=self.DN[0],
crop0=crop0, crop1=crop1, n=repeater),
PanSpec(image0=self.DN[1], crop0=crop1)]
xp = list(expand_pans(self.DN, pans))
# the result should have 'repeater' copies of the
# first file name and then the rest of them. Based on that,
# this construction should pass checkallnames():
self.checkallnames([xp[0]] + xp[repeater:])
# The results should pan 1 unit at a time
# over those first 'repeater' copies of the image and
# then be at crop1 for the remainder
for i, t in enumerate(xp):
with self.subTest(i=i, t=t):
if i < repeater:
self.assertEqual(t[1], [x + i for x in crop0])
self.assertEqual(t[0], self.DN[0])
else:
self.assertEqual(t[1], crop1)
self.assertEqual(t[0], self.DN[i + 1 - repeater])
def test5(self):
# Three edge cases - just one pan at:
# - the very beginning
# - the very end
# - somewhere in the middle
crop0 = [0, 10, 200, 210]
for nth in (0, 5, self.NDN-1):
with self.subTest(nth=nth):
pans = [PanSpec(image0=self.DN[nth], crop0=crop0)]
xp = list(expand_pans(self.DN, pans))
self.checkallnames(xp)
# every cropbox should just be crop0
for t in xp:
self.assertEqual(t[1], crop0)
def test6(self):
# every file has an individual crop box
crop_A = [0, 10, 200, 210]
pans = [PanSpec(image0=fn, crop0=[x + k for x in crop_A])
for k, fn in enumerate(self.DN)]
xp = list(expand_pans(self.DN, pans))
self.checkallnames(xp)
for i, t in enumerate(xp):
self.assertEqual(t[1], [x + i for x in crop_A])
def test7(self):
# every file has an individual crop box using the None form
# for all but the first (it's illegal on the first)
crop_A = [0, 10, 200, 210]
pans = [PanSpec(image0=self.DN[0], crop0=crop_A)]
pans += [PanSpec(image0=fn, crop0=None) for fn in self.DN[1:]]
xp = list(expand_pans(self.DN, pans))
self.checkallnames(xp)
for i, t in enumerate(xp):
self.assertEqual(t[0], self.DN[i])
self.assertEqual(t[1], crop_A)
# context manager encapsulates "delete the temp file when done"
@contextmanager
def _tempfile(self):
tfd, tfname = tempfile.mkstemp(text=True)
try:
yield tfd, tfname
finally:
# Close the file descriptor, but don't bomb if the file was
# closed already (which the test does Because Reasons)
try:
os.close(tfd)
except OSError:
pass
os.remove(tfname)
def test8(self):
# test the file form of gen_panspecs
with self._tempfile() as (tfd, tfname):
os.write(tfd, b'{"image0": "abc", "crop0": "0,1,2,3"}')
os.close(tfd)
pans = list(gen_panspecs(tfname))
self.assertEqual(len(pans), 1)
with self._tempfile() as (tfd, tfname):
os.write(tfd, b'[{"image0": "abc", "crop0": "0,1,2,3"},'
b'{"image0": "def", "crop0": "3,2,1,0"}]')
os.close(tfd)
pans = list(gen_panspecs(tfname))
self.assertEqual(len(pans), 2)
j1, j2 = pans
self.assertEqual(j1.image0, "abc")
self.assertEqual(j1.crop0, (0, 1, 2, 3))
self.assertEqual(j2.image0, "def")
self.assertEqual(j2.crop0, (3, 2, 1, 0))
unittest.main()
| 35.324111
| 79
| 0.540897
|
4a1975b2b4071b40477bc9bfebd8b957c0f78fa6
| 18,000
|
py
|
Python
|
test/functional/rpc_rawtransaction.py
|
JSKitty/dogecash
|
99b07b15c396da2a8fa5852655bf193016ee270a
|
[
"MIT"
] | 1
|
2021-12-16T01:12:10.000Z
|
2021-12-16T01:12:10.000Z
|
test/functional/rpc_rawtransaction.py
|
JSKitty/dogecash
|
99b07b15c396da2a8fa5852655bf193016ee270a
|
[
"MIT"
] | null | null | null |
test/functional/rpc_rawtransaction.py
|
JSKitty/dogecash
|
99b07b15c396da2a8fa5852655bf193016ee270a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransaction
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes,0,2)
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo')
#assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid DogeCash address", self.nodes[0].createrawtransaction, [], {'foo': 0})
#assert_raises_rpc_error(-3, "Amount is not a number", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
addr = self.nodes[0].getnewaddress("")
addrinfo = self.nodes[0].validateaddress(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info')
# Test `signrawtransaction` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransaction(rawtx, [prevtx])
assert succ["complete"]
del prevtx["amount"]
succ = self.nodes[0].signrawtransaction(rawtx, [prevtx])
assert succ["complete"]
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransaction, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransaction, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransaction, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-1, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
# createmultisig can only take public keys
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
assert_raises_rpc_error(-1, "no full public key for address", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('250.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransaction(rawTx2, inputs)
self.log.info(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransaction(rawTx2, inputs)
self.log.info(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxSignedComplete = self.nodes[2].signrawtransaction(rawTxPartialSigned1['hex'], inputs)
self.log.info(rawTxSignedComplete)
assert_equal(rawTxSignedComplete['complete'], True)
self.nodes[2].sendrawtransaction(rawTxSignedComplete['hex'])
rawTx2 = self.nodes[0].decoderawtransaction(rawTxSignedComplete['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('250.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["txid"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
| 51.873199
| 174
| 0.653
|
4a197644e6ae91fca7341bd3eb45b12b7ca04c9e
| 8,879
|
py
|
Python
|
retarded.py
|
velo223/Velo-s-Spam-Scripts
|
286943dee2734b0a412602794026b91b210565bc
|
[
"CC0-1.0"
] | null | null | null |
retarded.py
|
velo223/Velo-s-Spam-Scripts
|
286943dee2734b0a412602794026b91b210565bc
|
[
"CC0-1.0"
] | null | null | null |
retarded.py
|
velo223/Velo-s-Spam-Scripts
|
286943dee2734b0a412602794026b91b210565bc
|
[
"CC0-1.0"
] | null | null | null |
# Copypastas by BACVelocifaptor#6969, he#9999 and u/GerardDG on Reddit. Script written by BACVelocifaptor#6969. If you kang, you big gei.
opt = {
"v":"""
I always knew humanity cannot be the only cognizant species in the galaxy. The fact that we are less than type 1 on the Kardashev scale always bothered me, as did the hubris of humans who believed only in the power of money and sex, only on this plane of existence. I was always open to the existence of higher civilizations, higher-dimensional beings. What I did not really subscribe to was the notion that this life could be a simulation created by these higher-dimensional beings for their amusement. So I was rudely shocked while aboard an airplane I was kidnapped by a giant tentacled creature, its body apparently so massive that its tentacles extended into hyperspace. Through the jetstream, it picked me up, and led me to another world through some watery ether. And then I saw what I thought was God in all his glory: a giant Patrick the starfish angel. But he was just an archangel, who announced the coming of the one true God. And that is when I beheld his beautiful personage, a four-headed SpongeBob SquarePants adonis, his name written on the giant scarlet popcorn boxes distributed to all his devotees, who previously appeared as animalistic cartoons in a very realistic animation. Yes, it was him, the Being-Best-Friends-With-A-Woman-Is-Better-Than-Getting-Laid-Once God. As I chomped on the divine, giant popcorn, which was like Cheetos Flamin' Hot Popcorn but with KFC India Hot Wings masala instead, I saw the light and sought out my best friend, as I leaned my head on her shoulder, contemplating the very meaning of existence and the nature of the universe.
""","c":'''
It’s late, you’ve been in bed sleeping for a few hours. I crawl under the covers. I always sleep naked, so the cool sheets give my whole body goosebumps. I snuggle on my side into the blankets and shiver.\n\nSuddenly I feel you behind me, pressing your warm body to mine, wrapping your arms tight around me. You rub my skin firmly, warming me. Your hand makes its way to my hip and pulls me close. Your cock is hard and throbbing; pulsing between my ass cheeks. I push back, wiggling my hips. Your other hand moves up my body and moves around my neck. You gently squeeze and tilt my head back. You start to grip my neck tighter. You begin to whisper sweet and dirty things in my ear; telling me how you can’t wait to fill me, how badly you want me, how your cock will fit perfectly in my tight pussy.\n \nYour hand on my hip slides down between my legs. You touch my wet pussy, but I can tell you’re not satisfied. You know how to fix that though. You grip my throat tight and whisper:\n\n“Baby…make Daddy proud”\n\nI can feel my pussy soak your fingers when you say that. You know I love it when you’re my Daddy. You chuckle and moan your approval.\n\n“Mmm. My baby girl needs her Daddy to fill her pussy. Are you going to be good and take your Daddy’s big cock?”\n\nAs you say that your fingers slide deep into my pussy. I try to scream, but your hand on my throat muffles the sound. I grind back to try to find release. You’re right - I am so turned on when you take control of my body. I push back on your fingers and start to thrust. I’m desperate to cum for you; to make my Daddy proud. I can feel you smile as I wantonly fuck your fingers.\n\n“That’s right. Be a good girl and cum for your Daddy.”\n\nI let out a moan. My whole body tenses up and my back arches as I gush on your fingers. My pussy clenches and spasms, trying to find what it desperately needs: my Daddy’s hard cock to stuff it full of cum. You sense my need and chuckle in my ear.\n\n“Come on my greedy little slut. Show your Daddy how bad you need it.”\n\nI roll towards you on my other side and wrap my leg around your waist. My hand goes between our legs and grips your hard cock. I can feel precum oozing from the tip. I play with your cock and guide it between my legs where my spread, swollen pussy is waiting for you. I thrust forward so that the tip of your cock is touching my clit. I guide you around, smearing my clit with your precum. Your hand moves to my ass and forcefully spreads me apart, causing the tip of your cock to slip into my wet pussy. I clench around you, trying to draw you in deeper. The penetration is enough to drive me wild. I’m so close to cumming for you, but I need more.\n\nWithout warning you wrap your arms tight around me, pin me to your chest, and roll on to your back. You grip my hair and kiss me deeply. The tip of your cock is teasing my pussy. I’m trying to grind my hips to take you in. You move your hands down to my ass and give me a sharp swat. I yelp in pain and surprise. You use your hands to spread my ass wide as you tell me,\n\n“Baby, put it in. Bounce on your Daddy’s cock.”\n\nI don’t waste a moment. I slide down your thick shaft and stay there a moment, savoring the feeling of being full of your cock. It’s short-lived as I quickly feel the sting of your hand on my ass.\n\n“Bounce baby girl. Make Daddy drain his balls in your hungry little pussy.”\n\nI immediately start sliding up and down your thick cock. I can feel my cum dripping down your shaft. I feel my pussy lips spread and stretch around your cock as I start to fuck you. My pussy is so wet I can hear it each time I thrust your cock deep. My tits bounce as I ride you. Your fingers dig into my hips. I can feel your cock twitch and throb. You’re so close. I need your cum.\n\nI stop a moment to put my feet under me and raise myself up in a squatting position. I know this is what you really wanted all along; it will make you cum so hard. I start with shallow thrusts that tease your cockhead. I move down with deep, slower thrusts and feel the tip of your cock swell as I do. I smile. With my final thrusts I take all of your cock. I clench my pussy as I lift myself back to the tip and that’s when I feel the spurts of your thick, hot cum. I lower myself back down as you twitch, pushing your cum deep. You hold me there so you can pump me full. I grind to make sure your balls are totally empty. I groan your name as I cum for the last time.\n\nI shift back to my knees and bend forward to collapse on your chest. Your breathing is heavy but starting to slow. You wrap your arms around me and start giving me little kisses all over my neck. You whisper in my ear how much you love me, how you love it when I get so turned on when you talk dirty to me. You gently roll me on my back and start rubbing my sore thighs. You kiss me lovingly, bringing me down as I start to relax and drift to sleep.\n\nAs I start to feel your cum drip from my pussy I think, “I can’t wait until my Daddy fucks me again.”
''',"d":"""
Zwemmen in Bacardi Lemon is eigenlijk helemaal niet chill. Denk maar na. Ten eerste is het zoete plakzooi. Bacardi is 300 percent suiker weet je, het is gewoon vloeibare kauwgom. Als je daarin gaat zwemmen gaat dat enorm kleven.\n\nTen tweede ben je waarschijnlijk dronken voordat je je eerste baantje hebt gezwommen. De alcohol krijg je niet alleen binnen door het door te slikken maar ook via je gehemelte enzo. Het doet pijn aan je ogen en ik gok dat het brandt in je neus. Dronken worden is op zich lachen, maar het belemmert je zwemvermogen. Plus je kan moeilijk bijhouden hoeveel je binnenkrijgt. Dus de kans bestaat dat je jezelf in een coma zuipt en/of dat je in de Bacardi Lemon verdrinkt. Een ignobele dood als je het mij vraagt.\n\nMaar wat dus wel een goeie oplossing is, is als je een Bacardi Lemon sauna maakt. Dan heb je wel de alcohol, maar niet de zoete teringzooi (want die blijft achter na distillatie en de kans op comazuipen is kleiner).
""", "o":'''
There was once an unfortunate soul known as Onestone. As is probably evident from his name, he only had one testicle. He was always ostracized and made fun of as a kid. One day, he was drinking at a bar, when he met this cute girl, Beverly Bird, who was a childhood acquaintance. She found his name and condition very quaint, but he felt offended by it, so he got her drunk, took her home, and fucked her till she died. He dumped her body in a vat of acid. Two weeks later, Beverly's cousin, Penny Bird, who had no idea of the ill fate that had befallen her sister, saw Onestone at the same bar. "Hey Onestone. How's it hanging?" she asked. Onestone, offended by this, got her drunk and took her home. But no matter how brutally he'd fuck her, she just wouldn't die. Why? Because you can't kill two Birds with Onestone.
'''
}
userinput = input("""
Choose your option:
[V]elo
[C]at
[D]utch
[O]nestone
""")
with open('retarded.txt','w') as f:
f.write(opt[userinput.lower()])
print("Output saved to file. Now force-feed all your friends that sweet, sweet pasta until they hate you.")
| 355.16
| 5,006
| 0.7636
|
4a1978bf25138ad6f72eab4fa7e482ecabc9c326
| 3,419
|
py
|
Python
|
toontown/building/DistributedTrophyMgrAI.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
toontown/building/DistributedTrophyMgrAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
toontown/building/DistributedTrophyMgrAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 2
|
2020-11-08T03:38:35.000Z
|
2021-09-02T07:03:47.000Z
|
from direct.directnotify.DirectNotifyGlobal import *
from direct.distributed.DistributedObjectAI import DistributedObjectAI
MAX_LISTING = 10
AV_ID_INDEX = 0
NAME_INDEX = 1
SCORE_INDEX = 2
class DistributedTrophyMgrAI(DistributedObjectAI):
notify = directNotify.newCategory('DistributedTrophyMgrAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
# Load the leaderboard backup for this shard:
self.leaderInfo, self.trophyScores = simbase.backups.load(
'trophy-mgr', (simbase.air.districtId,), default=(([], [], []), {}))
def requestTrophyScore(self):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av is not None:
av.d_setTrophyScore(self.trophyScores.get(avId, 0))
def addTrophy(self, avId, name, numFloors):
if avId not in self.trophyScores:
self.trophyScores[avId] = 0
trophyScore = self.trophyScores[avId] + numFloors
self.updateTrophyScore(avId, trophyScore)
def removeTrophy(self, avId, numFloors):
if avId in self.trophyScores:
trophyScore = self.trophyScores[avId] - numFloors
self.updateTrophyScore(avId, trophyScore)
def updateTrophyScore(self, avId, trophyScore):
av = self.air.doId2do.get(avId)
if trophyScore <= 0:
# Take the player off the listing:
if avId in self.trophyScores:
del self.trophyScores[avId]
if avId in self.leaderInfo[AV_ID_INDEX]:
scoreIndex = self.leaderInfo[AV_ID_INDEX].index(avId)
del self.leaderInfo[AV_ID_INDEX][scoreIndex]
del self.leaderInfo[NAME_INDEX][scoreIndex]
del self.leaderInfo[SCORE_INDEX][scoreIndex]
else:
# Add the player to the listing if they haven't been. Otherwise,
# update their current trophy score:
self.trophyScores[avId] = trophyScore
if avId not in self.leaderInfo[AV_ID_INDEX]:
if av is None:
return
self.leaderInfo[AV_ID_INDEX].append(avId)
self.leaderInfo[NAME_INDEX].append(av.getName())
self.leaderInfo[SCORE_INDEX].append(trophyScore)
else:
scoreIndex = self.leaderInfo[AV_ID_INDEX].index(avId)
self.leaderInfo[SCORE_INDEX][scoreIndex] = trophyScore
# Truncate and reorganize the listing:
self.reorganize()
# Update the listing in the various Toon HQs:
messenger.send('leaderboardChanged')
messenger.send('leaderboardFlush')
if av is not None:
av.d_setTrophyScore(trophyScore)
simbase.backups.save('trophy-mgr', (simbase.air.districtId,),
(self.leaderInfo, self.trophyScores))
def reorganize(self):
# Sort the leader info:
leaderInfo = zip(*reversed(self.leaderInfo))
leaderInfo.sort(reverse=True)
# Construct the new, truncated leader info:
self.leaderInfo = [[], [], []]
for trophyScore, name, avId in leaderInfo[:MAX_LISTING]:
self.leaderInfo[AV_ID_INDEX].append(avId)
self.leaderInfo[NAME_INDEX].append(name)
self.leaderInfo[SCORE_INDEX].append(trophyScore)
def getLeaderInfo(self):
return self.leaderInfo
| 37.163043
| 80
| 0.632641
|
4a1978d263afd7a127c82febc6bff217f6447c5d
| 849
|
py
|
Python
|
naeval/ner/datasets/wikiner.py
|
sdspieg/naeval
|
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
|
[
"MIT"
] | 36
|
2020-03-22T09:37:10.000Z
|
2022-01-17T14:49:30.000Z
|
naeval/ner/datasets/wikiner.py
|
sdspieg/naeval
|
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
|
[
"MIT"
] | 11
|
2020-03-25T09:39:45.000Z
|
2020-08-16T05:37:02.000Z
|
naeval/ner/datasets/wikiner.py
|
sdspieg/naeval
|
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
|
[
"MIT"
] | 6
|
2020-05-16T05:52:04.000Z
|
2022-01-16T06:45:29.000Z
|
from corus import load_wikiner as load_wikiner_
from naeval.tokenizer import Token
from ..bio import bio_spans
from ..adapt import adapt_wikiner
from ..markup import Markup
class WikinerMarkup(Markup):
@property
def adapted(self):
return adapt_wikiner(self)
def chunk_tokens(chunks, sep=1):
start = 0
for chunk in chunks:
stop = start + len(chunk)
yield Token(start, stop, chunk)
start = stop + sep
def parse_wikiner(record):
chunks, tags = [], []
for chunk, pos, tag in record.tokens:
chunks.append(chunk)
tags.append(tag)
text = ' '.join(chunks)
tokens = chunk_tokens(chunks)
spans = list(bio_spans(tokens, tags))
return WikinerMarkup(text, spans)
def load_wikiner(path):
for record in load_wikiner_(path):
yield parse_wikiner(record)
| 21.225
| 47
| 0.669022
|
4a19792c39952b5f8ec395436a23056744ad007b
| 5,481
|
py
|
Python
|
spio/models/put_pages_page_id_components.py
|
bsneade/statuspageio-python
|
30526a2984251885381e781b12b5070d46063537
|
[
"Apache-2.0"
] | 2
|
2020-03-02T20:32:32.000Z
|
2020-05-20T16:54:58.000Z
|
spio/models/put_pages_page_id_components.py
|
bsneade/statuspageio-python
|
30526a2984251885381e781b12b5070d46063537
|
[
"Apache-2.0"
] | null | null | null |
spio/models/put_pages_page_id_components.py
|
bsneade/statuspageio-python
|
30526a2984251885381e781b12b5070d46063537
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Statuspage API
# Code of Conduct Please don't abuse the API, and please report all feature requests and issues to https://help.statuspage.io/help/contact-us-30 # Rate Limiting Each API token is limited to 1 request / second as measured on a 60 second rolling window. To get this limit increased or lifted, please contact us at https://help.statuspage.io/help/contact-us-30 # Basics ## HTTPS It's required ## URL Prefix In order to maintain version integrity into the future, the API is versioned. All calls currently begin with the following prefix: https://api.statuspage.io/v1/ ## RESTful Interface Wherever possible, the API seeks to implement repeatable patterns with logical, representative URLs and descriptive HTTP verbs. Below are some examples and conventions you will see throughout the documentation. * Collections are buckets: https://api.statuspage.io/v1/pages/asdf123/incidents.json * Elements have unique IDs: https://api.statuspage.io/v1/pages/asdf123/incidents/jklm456.json * GET will retrieve information about a collection/element * POST will create an element in a collection * PATCH will update a single element * PUT will replace a single element in a collection (rarely used) * DELETE will destroy a single element ## Sending Data Information can be sent in the body as form urlencoded or JSON, but make sure the Content-Type header matches the body structure or the server gremlins will be angry. All examples are provided in JSON format, however they can easily be converted to form encoding if required. Some examples of how to convert things are below: // JSON { \"incident\": { \"name\": \"test incident\", \"components\": [\"8kbf7d35c070\", \"vtnh60py4yd7\"] } } // Form Encoded (using curl as an example): curl -X POST https://api.statuspage.io/v1/example \\ -d \"incident[name]=test incident\" \\ -d \"incident[components][]=8kbf7d35c070\" \\ -d \"incident[components][]=vtnh60py4yd7\" # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from spio.configuration import Configuration
class PutPagesPageIdComponents(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'component': 'PostPagesPageIdComponentsComponent'
}
attribute_map = {
'component': 'component'
}
def __init__(self, component=None, local_vars_configuration=None): # noqa: E501
"""PutPagesPageIdComponents - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._component = None
self.discriminator = None
if component is not None:
self.component = component
@property
def component(self):
"""Gets the component of this PutPagesPageIdComponents. # noqa: E501
:return: The component of this PutPagesPageIdComponents. # noqa: E501
:rtype: PostPagesPageIdComponentsComponent
"""
return self._component
@component.setter
def component(self, component):
"""Sets the component of this PutPagesPageIdComponents.
:param component: The component of this PutPagesPageIdComponents. # noqa: E501
:type: PostPagesPageIdComponentsComponent
"""
self._component = component
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PutPagesPageIdComponents):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PutPagesPageIdComponents):
return True
return self.to_dict() != other.to_dict()
| 45.297521
| 2,064
| 0.647874
|
4a19792fc90160824cb71ad52afbfd0ed876585e
| 8,468
|
py
|
Python
|
google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
|
tmatsuo/python-pubsub
|
bfe37ddce4d421344068aa45454ee2176c1c06c8
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
|
tmatsuo/python-pubsub
|
bfe37ddce4d421344068aa45454ee2176c1c06c8
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
|
tmatsuo/python-pubsub
|
bfe37ddce4d421344068aa45454ee2176c1c06c8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import copy
import logging
import random
import threading
import time
import six
from google.cloud.pubsub_v1.subscriber._protocol import requests
_LOGGER = logging.getLogger(__name__)
_LEASE_WORKER_NAME = "Thread-LeaseMaintainer"
_LeasedMessage = collections.namedtuple(
"_LeasedMessage", ["sent_time", "size", "ordering_key"]
)
class Leaser(object):
def __init__(self, manager):
self._thread = None
self._manager = manager
# a lock used for start/stop operations, protecting the _thread attribute
self._operational_lock = threading.Lock()
# A lock ensuring that add/remove operations are atomic and cannot be
# intertwined. Protects the _leased_messages and _bytes attributes.
self._add_remove_lock = threading.Lock()
# Dict of ack_id -> _LeasedMessage
self._leased_messages = {}
"""dict[str, float]: A mapping of ack IDs to the local time when the
ack ID was initially leased in seconds since the epoch."""
self._bytes = 0
"""int: The total number of bytes consumed by leased messages."""
self._stop_event = threading.Event()
@property
def message_count(self):
"""int: The number of leased messages."""
return len(self._leased_messages)
@property
def ack_ids(self):
"""Sequence[str]: The ack IDs of all leased messages."""
return self._leased_messages.keys()
@property
def bytes(self):
"""int: The total size, in bytes, of all leased messages."""
return self._bytes
def add(self, items):
"""Add messages to be managed by the leaser."""
with self._add_remove_lock:
for item in items:
# Add the ack ID to the set of managed ack IDs, and increment
# the size counter.
if item.ack_id not in self._leased_messages:
self._leased_messages[item.ack_id] = _LeasedMessage(
sent_time=float("inf"),
size=item.byte_size,
ordering_key=item.ordering_key,
)
self._bytes += item.byte_size
else:
_LOGGER.debug("Message %s is already lease managed", item.ack_id)
def start_lease_expiry_timer(self, ack_ids):
"""Start the lease expiry timer for `items`.
Args:
items (Sequence[str]): Sequence of ack-ids for which to start
lease expiry timers.
"""
with self._add_remove_lock:
for ack_id in ack_ids:
lease_info = self._leased_messages.get(ack_id)
# Lease info might not exist for this ack_id because it has already
# been removed by remove().
if lease_info:
self._leased_messages[ack_id] = lease_info._replace(
sent_time=time.time()
)
def remove(self, items):
"""Remove messages from lease management."""
with self._add_remove_lock:
# Remove the ack ID from lease management, and decrement the
# byte counter.
for item in items:
if self._leased_messages.pop(item.ack_id, None) is not None:
self._bytes -= item.byte_size
else:
_LOGGER.debug("Item %s was not managed.", item.ack_id)
if self._bytes < 0:
_LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes)
self._bytes = 0
def maintain_leases(self):
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
deadline = self._manager.ack_deadline
_LOGGER.debug("The current deadline value is %d seconds.", deadline)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are beyond the max lease time. This ensures
# that in the event of a badly behaving actor, we can drop messages
# and allow the Pub/Sub server to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size, item.ordering_key)
for ack_id, item in six.iteritems(leased_messages)
if item.sent_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a streaming pull request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
self._manager.dispatcher.modify_ack_deadline(
[requests.ModAckRequest(ack_id, deadline) for ack_id in ack_ids]
)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, deadline * 0.9)
_LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
self._stop_event.wait(timeout=snooze)
_LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
def start(self):
with self._operational_lock:
if self._thread is not None:
raise ValueError("Leaser is already running.")
# Create and start the helper thread.
self._stop_event.clear()
thread = threading.Thread(
name=_LEASE_WORKER_NAME, target=self.maintain_leases
)
thread.daemon = True
thread.start()
_LOGGER.debug("Started helper thread %s", thread.name)
self._thread = thread
def stop(self):
with self._operational_lock:
self._stop_event.set()
if self._thread is not None:
# The thread should automatically exit when the consumer is
# inactive.
self._thread.join()
self._thread = None
| 39.203704
| 88
| 0.602858
|
4a1979f443b7dd8b5614278b43b107112373d18f
| 8,234
|
py
|
Python
|
hypebeast/hypebeast/spiders/hypebeast_sneaker.py
|
suntian123/sneaker_crawler
|
accde7b5331abb1b6d5780a30ad74c7018bfbf71
|
[
"MIT"
] | 4
|
2019-05-05T06:26:29.000Z
|
2019-06-25T03:37:38.000Z
|
hypebeast/hypebeast/spiders/hypebeast_sneaker.py
|
suntian123/sneaker_crawler
|
accde7b5331abb1b6d5780a30ad74c7018bfbf71
|
[
"MIT"
] | null | null | null |
hypebeast/hypebeast/spiders/hypebeast_sneaker.py
|
suntian123/sneaker_crawler
|
accde7b5331abb1b6d5780a30ad74c7018bfbf71
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
import os
from items import HypebeastItem
import datetime
import re
from sneakers import sneaker
class HypebeastSneakerSpider(scrapy.Spider):
name = 'hypebeast_sneaker'
page_crawled = 0
# allowed_domains = ['https://hypebeast.com/footwear']
start_urls = ['https://hypebeast.com/footwear/page/']
urls = [('hypebeast','https://hypebeast.com/footwear/page/'), ('snkr_news','https://sneakernews.com/page/')]
last_ids = {'hype': '-1', 'snkr': '-1','where2b': '-1'}
last_ids_reached = {'hype': 0, 'snkr': 0, 'where2b': 0}
most_ids = {'hype': '-1', 'snkr': '-1', 'where2b': '-1'}
_Debug = False
def parse(self, response):
self.last_ids = self.get_last_crawled()
for (site, url) in self.urls:
for page_num in range(4,0,-1):
crawling_url = url + str(page_num)
if(site == 'hypebeast'):
pass
#if not(self.last_ids_reached['hype']):
#yield scrapy.Request(crawling_url, callback=self.parse_hype_pages)
else:
if not (self.last_ids_reached['snkr']):
yield scrapy.Request(crawling_url, callback=self.parse_snkr_pages)
def parse_hype_pages(self, response):
if(self.last_ids_reached['hype']):
return
item = HypebeastItem()
selector = scrapy.Selector(response)
articles = selector.xpath('//div[@class="post-box-content-container"]') ##找到每篇文章的div#
for article in articles:
title_div = article.xpath('div[@class="post-box-content-title"]')
artical_mata = article.xpath('div[@class="post-box-content-meta"]')
artical_url = title_div.xpath('a/@href').extract()[0]
if str(artical_url) == self.last_ids['hype'].strip('\n'):
self.last_ids_reached['hype'] = 1
return
if self.most_ids['hype'] == '-1':
self.most_ids['hype'] = artical_url
title_text = title_div.xpath('a/@title').extract()[0]
artical_mata = artical_mata.xpath('div[@class="post-box-content-meta-time-info"]')[0]
datetime = artical_mata.xpath('span[@class="time"]/time/text()').extract()[0]
views = artical_mata.xpath('div[@class="post-box-stats"]/hype-count/span[@class="hype-count"]/text()').extract()[0]
views = views.replace("\n", "")
views = views.replace(" ", "")
views = views.replace(",", "")
views = views.strip('Hypes')
views = int(views)
self.page_crawled += 1
item['url'] = artical_url
item['title'] = title_text
item['views'] = views
item['time'] = self.convert_hype_time(datetime)
yield item
def parse_snkr_pages(self, response):
if(self.last_ids_reached['snkr']):
return
selector = scrapy.Selector(response)
articles = selector.xpath('//div[contains(@class, "-post-box")]')
title_div = articles.xpath('//div[contains(@class, "post-content")]')
post_box = title_div[0]
big_artical_url = post_box.xpath('//h2/a/@href').extract()
article_url = big_artical_url + post_box.xpath('//h4/a/@href').extract()
for url in article_url:
if url == self.last_ids['snkr'].strip('\n'):
self.last_ids_reached['snkr'] = 1
return
yield scrapy.Request(url, callback=self.process_snkr_page)
def process_snkr_page(self, response):
if str(response.url) == self.last_ids['hype'].strip('\n'):
self.last_ids_reached['hype'] = 1
return
if (self.last_ids_reached['snkr']):
return
print("\n================================Getting SNKRS page:{}==========================\n".format(response.url))
item = HypebeastItem()
snkrs = ""
time = response.url[24:34]
if (self.is_today(time) and self.most_ids['snkr'] == '-1'):
self.most_ids['snkr'] = response.url
self.write_last_crawled(self.most_ids)
selector = scrapy.Selector(response)
title = selector.xpath('/html/body/div[1]/div[2]/div/div[1]/div[1]/h1/text()').extract_first()
votes = selector.xpath('//div[@class = "vote-box"]/div[@class="post-ratings"]/span/i/text()').extract_first().strip(" ")
votes = votes.strip("VOTES")
print('-----------Titel = {}-----------\n'.format(title))
print('-----------Votes = {}-----------\n'.format(votes))
print('-----------Time = {}-----------\n'.format(time))
release_divs = selector.xpath('//blockquote[@class = "releases"]/p')
print('-----------Found {} Release Divs:{}\n'.format(len(release_divs), release_divs))
for release_div in release_divs:
snkr = sneaker()
prize = "".join(release_div.xpath('text()').extract()).strip("\n$").strip(' ')
print('-----------Prize = {}-----------\n'.format(prize))
info = release_div.xpath('strong')
if len(info) > 1:
date = info[1].xpath('text()').extract()[0].strip("Release Date: ")
else:
date = "unknown"
name = info[0].xpath('text()').extract()[0]
info = release_div.xpath('//small/text()').extract()
color = '-'
code = '-'
while(len(info)>0):
if("Color: "in info[0]):
color = info[0].strip("Color: ")
elif("Style Code: " in info[0]):
code = info[0].strip("Style Code: ")
del(info[0])
snkr.name(name)
snkr.color(color)
snkr.prize(prize)
snkr.release(date)
snkr.id(code)
print(str(snkr))
snkrs += snkr.snkr_name
snkrs += ", "
item['url'] = response.url
item['time'] = time
item['votes'] = votes
item['title'] = title
item['sneaker'] = str(snkrs)
self.page_crawled += 1
yield item
def get_last_crawled(self):
last_doc = open("last_doc.txt",'r')
result = {'hype': '', 'snkr': '', 'where2b': ''}
if(os.stat('last_doc.txt').st_size==0):
last_doc.close()
return result
for line in last_doc:
id_entry = line.split(',')
if id_entry[0] == 'where2b':
result['where2b'] = id_entry[-1]
elif id_entry[0] == 'snkr':
result['snkr'] = id_entry[-1]
elif id_entry[0] == 'hype':
result['hype'] = id_entry[-1]
last_doc.close()
return result
def write_last_crawled(self, last_crawled):
last_doc = open("last_doc.txt",'w')
for site,url in last_crawled.items():
last_doc.write('{},{}\n'.format(site,url))
last_doc.close()
def is_today(self, date):
'''
Check if the input date is the current date
:param date: string e.g.: 2018/07/23
:return: bool
'''
if(datetime.datetime.now().strftime("%Y/%m/%d") == date):
return True
else:
return False
def convert_hype_time(self, text_time):
result = ''
if("ago" in text_time):
reobject = re.match(r'^(\d+) ([\w]+) ago', text_time)
if('Hr' in reobject.group(2) or 'Min' in reobject.group(2)):
result = datetime.datetime.now().strftime("%Y/%m/%d")
if ('day' in reobject.group(2)):
time_obj = datetime.datetime.today() - datetime.timedelta(days=int(reobject.group(1)))
result = time_obj.strftime("%Y/%m/%d")
else:
time_obj = datetime.datetime.strptime(text_time, '%b %d, %Y')
result = time_obj.strftime("%Y/%m/%d")
return result
def get_inpage_sneaker(self, url):
print("\n================================Getting Request From({})==========================\n".format(url))
return scrapy.Request(url, callback=self.process_snkr_page)
| 39.397129
| 128
| 0.527083
|
4a197b78d70afc84d61a3ad46a0ee564bbeab103
| 5,227
|
py
|
Python
|
template_profiler_panel/panels/template.py
|
Penagwin/django-debug-toolbar-template-profiler
|
d8a908a5c36c4d40dfeb975ff04e016da6cacc8e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
template_profiler_panel/panels/template.py
|
Penagwin/django-debug-toolbar-template-profiler
|
d8a908a5c36c4d40dfeb975ff04e016da6cacc8e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
template_profiler_panel/panels/template.py
|
Penagwin/django-debug-toolbar-template-profiler
|
d8a908a5c36c4d40dfeb975ff04e016da6cacc8e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import inspect
from collections import defaultdict
from time import time
import wrapt
from django.dispatch import Signal
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql.utils import contrasting_color_generator
template_rendered = Signal(providing_args=['instance', 'start', 'end', 'level'])
class TemplateProfilerPanel(Panel):
'''
Displays template rendering times on the request timeline
'''
template = 'template_profiler_panel/template.html'
def __init__(self, *args, **kwargs):
self.colors = {}
self.templates = []
self.color_generator = contrasting_color_generator()
self.t_min = 0
self.t_max = 0
self.total = 0
self.monkey_patch_template_classes()
self.is_enabled = False
template_rendered.connect(self.record)
super(TemplateProfilerPanel, self).__init__(*args, **kwargs)
have_monkey_patched_template_classes = False
@classmethod
def monkey_patch_template_classes(cls):
if cls.have_monkey_patched_template_classes:
return
from django.template import Template as DjangoTemplate
template_classes = [DjangoTemplate]
try:
from jinja2 import Template as Jinja2Template
except ImportError:
pass
else:
template_classes.append(Jinja2Template)
@wrapt.decorator
def render_wrapper(wrapped, instance, args, kwargs):
start = time()
result = wrapped(*args, **kwargs)
end = time()
stack_depth = 1
current_frame = inspect.currentframe()
while True:
current_frame = current_frame.f_back
if current_frame is None:
break
stack_depth += 1
template_rendered.send(
sender=instance.__class__,
instance=instance,
start=start,
end=end,
level=stack_depth,
)
return result
for template_class in template_classes:
template_class.render = render_wrapper(template_class.render)
cls.have_monkey_patched_template_classes = True
@property
def nav_title(self):
return _('Template Profiler')
@property
def nav_subtitle(self):
return _('{} calls in {:.2f} ms').format(
self.total, (self.t_max - self.t_min) * 1000.0)
@property
def title(self):
return _('Template Rendering Time')
def _get_color(self, level):
return self.colors.setdefault(level, next(self.color_generator))
def record(self, instance, start, end, level, **kwargs):
if not self.enabled:
return
template_name = instance.name
# Logic copied from django-debug-toolbar:
# https://github.com/jazzband/django-debug-toolbar/blob/5d095f66fde8f10b45a93c0b35be0a85762b0458/debug_toolbar/panels/templates/panel.py#L77
is_skipped_template = isinstance(template_name, str) and (
template_name.startswith("debug_toolbar/")
or template_name.startswith(
tuple(self.toolbar.config["SKIP_TEMPLATE_PREFIXES"])
)
)
if is_skipped_template:
return
bg = self._get_color(level)
text = '#ffffff' if int(bg[1:], 16) < 0x8fffff else '#000000'
color = {'bg': bg, 'text': text}
self.templates.append({
'start': start,
'end': end,
'time': (end - start) * 1000.0,
'level': level,
'name': template_name,
'color': color,
})
def enable_instrumentation(self):
self.is_enabled = True
def disable_instrumentation(self):
self.is_enabled = False
def _calc_p(self, part, whole):
return (part / whole) * 100.0
def _calc_timeline(self, start, end):
result = {}
result['offset_p'] = self._calc_p(
start - self.t_min, self.t_max - self.t_min)
result['duration_p'] = self._calc_p(
end - start, self.t_max - self.t_min)
result['rel_duration_p'] = self._calc_p(
result['duration_p'], 100 - result['offset_p'])
return result
def generate_stats(self, request, response):
summary = defaultdict(float)
# Collect stats
for template in self.templates:
if self.t_min == 0:
self.t_min = template['start']
elif template['start'] < self.t_min:
self.t_min = template['start']
if template['end'] > self.t_max:
self.t_max = template['end']
summary[template['name']] += template['time']
# Calc timelines
for template in self.templates:
template.update(
self._calc_timeline(template['start'], template['end']))
self.total = len(self.templates)
self.record_stats(
{'templates': sorted(self.templates, key=lambda d: d['start']),
'summary': sorted(summary.items(), key=lambda t: -t[1])})
| 30.389535
| 148
| 0.598623
|
4a197b906b5597cf233ce958fc7fb56937a6dee3
| 101
|
py
|
Python
|
examples/cors_per_route.py
|
izi-global/izir
|
d1a4bfb5c082c3de1956402ef0280564014a3bd8
|
[
"MIT"
] | null | null | null |
examples/cors_per_route.py
|
izi-global/izir
|
d1a4bfb5c082c3de1956402ef0280564014a3bd8
|
[
"MIT"
] | 5
|
2021-03-18T21:01:05.000Z
|
2022-03-11T23:29:48.000Z
|
examples/cors_per_route.py
|
izi-global/izir
|
d1a4bfb5c082c3de1956402ef0280564014a3bd8
|
[
"MIT"
] | null | null | null |
import izi
@izi.get()
def cors_supported(cors: izi.directives.cors="*"):
return "Hello world!"
| 14.428571
| 50
| 0.683168
|
4a197c5d5eb4a31d4a2e698d001a1ac85fb34bed
| 7,944
|
py
|
Python
|
src/practice_problem2.py
|
bednartc/Exam2Practice
|
3856b9a899d8144a7356e225e161e5fb100ddb4a
|
[
"MIT"
] | null | null | null |
src/practice_problem2.py
|
bednartc/Exam2Practice
|
3856b9a899d8144a7356e225e161e5fb100ddb4a
|
[
"MIT"
] | null | null | null |
src/practice_problem2.py
|
bednartc/Exam2Practice
|
3856b9a899d8144a7356e225e161e5fb100ddb4a
|
[
"MIT"
] | null | null | null |
"""
PRACTICE Test 2, practice_problem 2.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Travis Bednarek.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
########################################################################
import simple_testing as st
def main():
""" Calls the TEST functions in this module. """
run_test_practice_problem2a()
run_test_practice_problem2b()
# ----------------------------------------------------------------------
# Students: Some of the testing code below uses SimpleTestCase objects,
# from the imported simple_testing (st) module.
# ----------------------------------------------------------------------
def run_test_practice_problem2a():
""" Tests the practice_problem2a function. """
# ------------------------------------------------------------------
# Done: 2. Implement this TEST function.
# It TESTS the practice_problem2a function defined below.
# Include at least ** 4 reasonable ** tests.
#
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 3
# TIME ESTIMATE: 5 minutes.
####################################################################
print()
print('--------------------------------------------------')
print('Testing the practice_problem2a function:')
print('--------------------------------------------------')
sequence = [2,10,5,-20,8]
delta = 6
expected = [8,16,11,-14,14]
print('expected:', expected)
print('actual:', practice_problem2a(sequence, delta))
def practice_problem2a(sequence, delta):
"""
What comes in:
-- A sequence of integers, e.g. ([2, 10, 5, -20, 8])
-- A number delta
What goes out:
-- Returns a new list that is the same as the given list,
but with each number in the list having had the given
delta
added to it (see example below)
Side effects: None.
Example:
Given the list [2, 10, 5, -20, 8] and the number 6,
this problem returns [8, 16, 11, -14, 14]
Type hints:
:type sequence: [int]
:type delta: int
"""
####################################################################
# Done: 3. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 5 minutes.
####################################################################
for k in range(len(sequence)):
sequence[k] = sequence[k] + delta
return sequence
def run_test_practice_problem2b():
""" Tests the practice_problem2b function. """
# ------------------------------------------------------------------
# 4 tests, plus a 5th after these.
# They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem2b(('hello', 'Bye', 'ok joe'))
# and compare the returned value against 'hBo' (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem2b,
[('hello', 'Bye', 'ok joe')],
'hBo'),
st.SimpleTestCase(practice_problem2b,
[('Alice', 'Bob', 'Carson', 'Devi')],
'ABCD'),
st.SimpleTestCase(practice_problem2b,
[('', 'tricky', '', 'one, no?', '!')],
'to!'),
st.SimpleTestCase(practice_problem2b,
[('my very long string', 'ok', 'mmmm')],
'mom'),
]
jokes = """
Q: What is it called when a cat wins a dog show?
A: A CAT-HAS-TROPHY!
Q: What do you call a pile of kittens?
A: a meowntain
Q: Why don't cats like online shopping?
A: They prefer a cat-alogue.
Q: What did the cat say when he lost all his money?
A: I'm paw!
Q: Did you hear about the cat who swallowed a ball of yarn?
A: She had a litter of mittens.
Q: What do you call a lion who has eaten your mother's sister?
A: An aunt-eater!
Q. How do you know when your cat's done cleaning herself?
A. She's smoking a cigarette.
source: http://www.jokes4us.com/animaljokes/catjokes.html
"""
# 5th test: Split jokes at spaces to get a list of strings.
sequence = jokes.split()
answer = ('QWiicwacwadsAACQWdycapokAamQWdclosAT' +
'pacQWdtcswhlahmAIpQDyhatcwsaboyAShalom' +
'QWdycalwheymsAAaQHdykwycdchASsacsh')
tests.append(st.SimpleTestCase(practice_problem2b,
[sequence],
answer))
# ------------------------------------------------------------------
# Run the 5 tests in the tests list constructed above.
# ------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem2b', tests)
def practice_problem2b(sequence):
"""
What comes in:
-- A sequence of strings, e.g. ('hello', 'Bye', 'ok joe')
What goes out:
-- Returns the string that contains the first letter in
each of the strings in the given sequence,
in the order in which they appear in the sequence.
(So 'hBo' for the example sequence above).
Side effects: None.
Examples:
Given ['hello', 'Bye', 'ok joe'] returns 'hBo'.
Given ('Alice, 'Bob', 'Carson', 'Devi') returns 'ABCD'.
Given ('', 'tricky', '', 'one, no?', '!') returns 'to!'
Given [] returns ''
Given ('my very long string', 'ok', 'mmmm') returns 'mom'
Type hints:
:type sequence [str]
"""
####################################################################
# Done: 4. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 10 minutes.
####################################################################
word = ''
for k in range (len(sequence)):
length = len(sequence[k])
if length < 1:
word = word
else:
word = word + sequence[k][0]
return word
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 38.941176
| 72
| 0.482125
|
4a197d3f60d0e0063c636ba18cafb29af21167cd
| 3,750
|
py
|
Python
|
src/gen_data.py
|
Atharva-Gundawar/Not-So-Deep-Face
|
b4721078317ae67fa4f18e0a875329f90be2c112
|
[
"MIT"
] | null | null | null |
src/gen_data.py
|
Atharva-Gundawar/Not-So-Deep-Face
|
b4721078317ae67fa4f18e0a875329f90be2c112
|
[
"MIT"
] | null | null | null |
src/gen_data.py
|
Atharva-Gundawar/Not-So-Deep-Face
|
b4721078317ae67fa4f18e0a875329f90be2c112
|
[
"MIT"
] | null | null | null |
import os
import cv2
import dlib
import time
import argparse
import numpy as np
from imutils import video
DOWNSAMPLE_RATIO = 4
def reshape_for_polyline(array):
return np.array(array, np.int32).reshape((-1, 1, 2))
def main():
os.makedirs('original', exist_ok=True)
os.makedirs('landmarks', exist_ok=True)
cap = cv2.VideoCapture(args.filename)
fps = video.FPS().start()
count = 0
while cap.isOpened():
ret, frame = cap.read()
frame_resize = cv2.resize(frame, None, fx=1 / DOWNSAMPLE_RATIO, fy=1 / DOWNSAMPLE_RATIO)
gray = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1)
black_image = np.zeros(frame.shape, np.uint8)
t = time.time()
# Perform if there is a face detected
if len(faces) == 1:
for face in faces:
detected_landmarks = predictor(gray, face).parts()
landmarks = [[p.x * DOWNSAMPLE_RATIO, p.y * DOWNSAMPLE_RATIO] for p in detected_landmarks]
jaw = reshape_for_polyline(landmarks[0:17])
left_eyebrow = reshape_for_polyline(landmarks[22:27])
right_eyebrow = reshape_for_polyline(landmarks[17:22])
nose_bridge = reshape_for_polyline(landmarks[27:31])
lower_nose = reshape_for_polyline(landmarks[30:35])
left_eye = reshape_for_polyline(landmarks[42:48])
right_eye = reshape_for_polyline(landmarks[36:42])
outer_lip = reshape_for_polyline(landmarks[48:60])
inner_lip = reshape_for_polyline(landmarks[60:68])
color = (255, 255, 255)
thickness = 3
cv2.polylines(black_image, [jaw], False, color, thickness)
cv2.polylines(black_image, [left_eyebrow], False, color, thickness)
cv2.polylines(black_image, [right_eyebrow], False, color, thickness)
cv2.polylines(black_image, [nose_bridge], False, color, thickness)
cv2.polylines(black_image, [lower_nose], True, color, thickness)
cv2.polylines(black_image, [left_eye], True, color, thickness)
cv2.polylines(black_image, [right_eye], True, color, thickness)
cv2.polylines(black_image, [outer_lip], True, color, thickness)
cv2.polylines(black_image, [inner_lip], True, color, thickness)
# Display the resulting frame
count += 1
print(count)
cv2.imwrite("original/{}.png".format(count), frame)
cv2.imwrite("landmarks/{}.png".format(count), black_image)
fps.update()
print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
if count == args.number: # only take 400 photos
break
elif cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("No face detected")
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', dest='filename', type=str, help='Name of the video file.')
parser.add_argument('--num', dest='number', type=int, help='Number of train data to be created.')
parser.add_argument('--landmark-model', dest='face_landmark_shape_file', type=str, help='Face landmark model file.')
args = parser.parse_args()
# Create the face predictor and landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args.face_landmark_shape_file)
main()
| 38.265306
| 120
| 0.617333
|
4a197d50de68ef762da231759d8a463f76c24561
| 68
|
py
|
Python
|
lightautoml/addons/interpretation/__init__.py
|
Zhurik/LightAutoML
|
506d0602f40eca79ed0e5d58e7d4f71aeb1d8059
|
[
"Apache-2.0"
] | null | null | null |
lightautoml/addons/interpretation/__init__.py
|
Zhurik/LightAutoML
|
506d0602f40eca79ed0e5d58e7d4f71aeb1d8059
|
[
"Apache-2.0"
] | null | null | null |
lightautoml/addons/interpretation/__init__.py
|
Zhurik/LightAutoML
|
506d0602f40eca79ed0e5d58e7d4f71aeb1d8059
|
[
"Apache-2.0"
] | 1
|
2021-12-08T13:52:45.000Z
|
2021-12-08T13:52:45.000Z
|
from .lime import LimeTextExplainer
__all__ = ['LimeTextExplainer']
| 22.666667
| 35
| 0.808824
|
4a197da24bc1970966bf66e0c43b72ad5bfee80a
| 2,032
|
py
|
Python
|
Course_Material/MyCustomEnv.py
|
rshnn/Practical-RL
|
f7688e224a342c7f67478f2c4cd6bb7b1a122205
|
[
"MIT"
] | 3
|
2022-02-14T17:59:56.000Z
|
2022-02-15T10:08:43.000Z
|
Course_Material/MyCustomEnv.py
|
rshnn/Practical-RL
|
f7688e224a342c7f67478f2c4cd6bb7b1a122205
|
[
"MIT"
] | 21
|
2021-11-02T21:35:26.000Z
|
2022-01-17T18:50:42.000Z
|
Course_Material/MyCustomEnv.py
|
rshnn/Practical-RL
|
f7688e224a342c7f67478f2c4cd6bb7b1a122205
|
[
"MIT"
] | 2
|
2021-11-24T15:25:17.000Z
|
2022-02-14T19:04:56.000Z
|
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
class MyCustomEnvClass(gym.Env):
def __init__(self):
self.action_space = gym.spaces.box.Box(
low=np.array([-40.0, -40.0], dtype=np.float32),
high=np.array([40.0, 40.0], dtype=np.float32))
self.observation_space = gym.spaces.box.Box(
low=np.array([-100, -100, -100, -100, 0], dtype=np.float32),
high=np.array([100, 100, 100, 100, 360], dtype=np.float32))
#alternative way of writing the obs space if all high/low are the same: gym.spaces.box.Box(low=-100,high=100, shape=(4,), dtype=np.float32)
self.init_x = 30
self.init_y = 30
self.heading = 0
self.timestep = 0.5
self.reset()
def step(self, action):
self.heading += action[0]
self.heading = self.heading%360
throttle = action[1]/40
angle_to_move = self.heading * np.pi / 180.0
old_distance = np.linalg.norm([self.target_x - self.x, self.target_y - self.y])
self.x += throttle*self.timestep*np.cos(angle_to_move)
self.y += throttle*self.timestep*np.sin(angle_to_move)
new_distance = np.linalg.norm([self.target_x - self.x, self.target_y - self.y])
reward = float(old_distance - new_distance)
self.state = np.array([self.target_x, self.target_y, self.x, self.y, self.heading], dtype=np.float32)
done = self.done
info = {}
return self.state, reward, done, info
def render(self):
pass
def reset(self):
self.target_x = self.init_x
self.target_y = self.init_y
self.x = 0
self.y = 0
self.done = False
self.state = np.array([self.target_x, self.target_y, self.x, self.y, self.heading], dtype=np.float32)
return self.state
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
| 36.285714
| 147
| 0.587598
|
4a197ec9a91261201d8b87a0986a3a8b0851ad07
| 42
|
py
|
Python
|
dhlmex/version.py
|
cuenca-mx/dhlmex-python
|
3b09e172e33c56023bf702a8bb2f2d9ccf15b758
|
[
"MIT"
] | 1
|
2020-11-02T21:14:43.000Z
|
2020-11-02T21:14:43.000Z
|
dhlmex/version.py
|
cuenca-mx/dhlmex-python
|
3b09e172e33c56023bf702a8bb2f2d9ccf15b758
|
[
"MIT"
] | 5
|
2020-01-15T10:54:41.000Z
|
2021-02-26T04:11:28.000Z
|
dhlmex/version.py
|
cuenca-mx/dhlmex-python
|
3b09e172e33c56023bf702a8bb2f2d9ccf15b758
|
[
"MIT"
] | 1
|
2020-11-02T21:14:35.000Z
|
2020-11-02T21:14:35.000Z
|
__version__ = '0.0.5' # pragma: no cover
| 21
| 41
| 0.642857
|
4a197f6d318d674dd81f1ad37bfd114d1bddf696
| 5,961
|
py
|
Python
|
power_outage_notify/settings.py
|
MakingL/power_outage_notify
|
a1d18e5a2ca6fab32ef7cac241dac135aef06709
|
[
"MIT"
] | 6
|
2019-10-24T09:22:27.000Z
|
2020-12-11T02:12:35.000Z
|
power_outage_notify/settings.py
|
MakingL/power_outage_notify
|
a1d18e5a2ca6fab32ef7cac241dac135aef06709
|
[
"MIT"
] | null | null | null |
power_outage_notify/settings.py
|
MakingL/power_outage_notify
|
a1d18e5a2ca6fab32ef7cac241dac135aef06709
|
[
"MIT"
] | null | null | null |
"""
Django settings for power_outage_notify project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2$k5oiw00n1fb@rs=_&@=7#-5ugnt!ykg&+zmlld+02@3ctu$m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["*", ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_crontab',
'notify.apps.NotifyConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'power_outage_notify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'power_outage_notify.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Email Setting
# 发送邮件设置
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# SMTP地址
EMAIL_HOST = 'smtp.126.com'
# SMTP端口
EMAIL_PORT = 25
# TODO: 请设置自己用于发邮件的邮箱, 设置自己的邮箱及授权码
# 自己的邮箱
EMAIL_HOST_USER = 'xxx@126.com'
# 自己的邮箱授权码,非密码
EMAIL_HOST_PASSWORD = 'xxxx'
EMAIL_SUBJECT_PREFIX = '[Power Outage Notify]'
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# 与SMTP服务器通信时,是否启动TLS链接(安全链接)。默认是false
EMAIL_USE_TLS = False
# logging configurations
log_save_path = {
'server': 'log/server/service.log',
}
for log_path in log_save_path.values():
log_dir = os.path.dirname(log_path)
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
LOG_FILE_BACKUP_DAYS = 5
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '[%(asctime)s] %(levelname)s [%(funcName)s: %(filename)s, %(lineno)d] %(message)s'
},
'thread_simple': {
'format': '[%(asctime)s] %(levelname)s [thread: %(threadName)s] %(message)s'
},
'thread': {
'format': '[%(asctime)s] %(levelname)s [thread: %(threadName)s] [%(funcName)s: %(filename)s, %(lineno)d] %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'default',
},
'file_backup': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': log_save_path['server'],
'formatter': 'default',
'maxBytes': 1024 * 1024, # 1M
'backupCount': LOG_FILE_BACKUP_DAYS,
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': log_save_path['server'],
'formatter': 'default',
'when': 'midnight',
'interval': 1,
'backupCount': LOG_FILE_BACKUP_DAYS,
},
},
'loggers': {
'': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'file': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'console': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# ==== 定时任务 ====
# 每天 19:30 分爬取一次通告信息
CRONJOBS = (
('30 19 * * *', 'notify.task.task_crawl_power_outage_info'),
)
| 27.985915
| 129
| 0.607784
|
4a197fec74450d9eab3e1711db2840fd62021959
| 240
|
py
|
Python
|
example/simul_sort.py
|
jackd/numba-neighbors
|
613fcc9be3a4050f23eb1fa319ea16b6848dc754
|
[
"MIT"
] | 5
|
2020-04-07T08:11:13.000Z
|
2022-03-01T21:43:27.000Z
|
example/simul_sort.py
|
jackd/numba-neighbors
|
613fcc9be3a4050f23eb1fa319ea16b6848dc754
|
[
"MIT"
] | 3
|
2020-06-14T22:13:51.000Z
|
2021-09-08T01:33:01.000Z
|
example/simul_sort.py
|
jackd/numba-neighbors
|
613fcc9be3a4050f23eb1fa319ea16b6848dc754
|
[
"MIT"
] | 1
|
2020-06-14T19:50:05.000Z
|
2020-06-14T19:50:05.000Z
|
import numpy as np
import sklearn.neighbors
k = 10
dist = np.random.uniform(size=(1, k,), high=100)
idx = np.random.uniform(size=(1, k,), high=1000).astype(np.int64)
sklearn.neighbors._kd_tree.simultaneous_sort(dist, idx)
print(dist[0])
| 21.818182
| 65
| 0.729167
|
4a19801a1f86a588fd8ac13655d3e91bf4f48fda
| 4,444
|
py
|
Python
|
tensorflow/python/keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras text vectorization preprocessing layer's adapt method."""
import collections
import itertools
import random
import string
import time
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.layers.preprocessing import index_lookup
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
v2_compat.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
def get_top_k(dataset, k):
"""Python implementation of vocabulary building using a defaultdict."""
counts = collections.defaultdict(int)
for tensor in dataset:
data = tensor.numpy()
for element in data:
counts[element] += 1
sorted_vocab = [
k for k, _ in sorted(
counts.items(), key=lambda item: item[1], reverse=True)
]
if len(sorted_vocab) > k:
sorted_vocab = sorted_vocab[:k]
return sorted_vocab
class BenchmarkAdapt(benchmark.TensorFlowBenchmark):
"""Benchmark adapt."""
def run_numpy_implementation(self, num_elements, batch_size, k):
"""Test the python implementation."""
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
batched_ds = ds.take(num_elements).batch(batch_size)
input_t = keras.Input(shape=(), dtype=dtypes.string)
layer = index_lookup.IndexLookup(
max_tokens=k,
num_oov_indices=0,
mask_token=None,
oov_token="OOV",
dtype=dtypes.string)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
starts.append(time.time())
vocab = get_top_k(batched_ds, k)
layer.set_vocabulary(vocab)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time
def bm_adapt_implementation(self, num_elements, batch_size, k):
"""Test the KPL adapt implementation."""
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
batched_ds = ds.take(num_elements).batch(batch_size)
input_t = keras.Input(shape=(), dtype=dtypes.string)
layer = index_lookup.IndexLookup(
max_tokens=k,
num_oov_indices=0,
mask_token=None,
oov_token="OOV",
dtype=dtypes.string)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
starts.append(time.time())
layer.adapt(batched_ds)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
name = "index_lookup_adapt|%s_elements|vocab_size_%s|batch_%s" % (
num_elements, k, batch_size)
baseline = self.run_numpy_implementation(num_elements, batch_size, k)
extras = {
"numpy implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for vocab_size in [100, 1000, 10000, 100000, 1000000]:
for batch in [1, 16, 2048]:
self.bm_adapt_implementation(vocab_size, batch, int(vocab_size / 10))
if __name__ == "__main__":
test.main()
| 34.992126
| 80
| 0.683843
|
4a19802b9ef9bd05bfc1ce6736ee9d37052f9048
| 595
|
py
|
Python
|
Physics250-ME27/magneticTorqueofCoilBnotIJK.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME27/magneticTorqueofCoilBnotIJK.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME27/magneticTorqueofCoilBnotIJK.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
def initialtorque():
loops = input("Input number of Loops: ")
radius = input("Input Diameter (cm): ")
current = input("Input current (A): ")
magField = input("Input the magnetic field (T): ")
angle = input("Input the given angle: ")
radius = float(radius)
loops = float(loops)
current = float(current)
magField = float(magField)
angle = float(angle)
radius = radius/200
torque = loops * pow(radius,2) * math.pi * current * magField * math.cos(math.radians(angle))
print(torque)
initialtorque()
| 33.055556
| 98
| 0.628571
|
4a19809a0e67738c7033110ed8c7a03bcddd7ffc
| 939
|
py
|
Python
|
src/204. Count Primes.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/204. Count Primes.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/204. Count Primes.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
class SolutionMine(object):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 2: return 0
A = [1] * n
numPrimes = 0
for p in xrange(2, n):
if not A[p]: continue
# p is prime! clear from p*2 to p*k ... where p*k <= n
numPrimes += 1
pp = p + p
while pp < n:
A[pp] = 0
pp += p
return numPrimes
class Solution: # good solution!
# @param {integer} n
# @return {integer}
def countPrimes(self, n):
if n < 3:
return 0
primes = [True] * n
primes[0] = primes[1] = False
for i in range(2, int(n ** 0.5) + 1):
if primes[i]:
primes[i * i: n: i] = [False] * len(primes[i * i: n: i])
return sum(primes)
print Solution().countPrimes(1)
print Solution().countPrimes(2)
print Solution().countPrimes(3)
print Solution().countPrimes(6)
print Solution().countPrimes(10)
print Solution().countPrimes(100)
| 22.902439
| 72
| 0.554846
|
4a1981c6b8c4bf7c94469afaeaa354cc9bfe546b
| 6,488
|
py
|
Python
|
Reference/qpc/examples/workstation/dpp/qview/dpp1.py
|
Harveyhubbell/Paid-RTOS
|
e56a1346cce026428c2bfef05b6a4e6bb2ee7f4e
|
[
"MIT"
] | null | null | null |
Reference/qpc/examples/workstation/dpp/qview/dpp1.py
|
Harveyhubbell/Paid-RTOS
|
e56a1346cce026428c2bfef05b6a4e6bb2ee7f4e
|
[
"MIT"
] | null | null | null |
Reference/qpc/examples/workstation/dpp/qview/dpp1.py
|
Harveyhubbell/Paid-RTOS
|
e56a1346cce026428c2bfef05b6a4e6bb2ee7f4e
|
[
"MIT"
] | null | null | null |
# This is an example of QView customization for a specific application
# (DPP in this case). This example animates the Phil images on the
# QView canvas. Additionally, there is a button in the middle of the screen,
# which, when clicked once pauses the DPP ("forks" are not being served).
# A second click on the button, "un-pauses" the DPP ("forks" are served
# to all hungry Philosophers).
#
# This version of the DPP customization uses the standard QS_QEP_STATE_ENTRY
# packet, which provides information about the current states of the dining
# Philosophers. The example also demonstrates how to intercept the QS
# "dictionary" records QS_OBJ_DICT and QS_FUN_DICT to extract the information
# about the addresses of the Philosopher objects and the states of their
# state machines.
#
class DPP:
def __init__(self):
# add commands to the Custom menu...
QView.custom_menu.add_command(label="Custom command",
command=self.cust_command)
# configure the custom QView.canvas...
QView.show_canvas() # make the canvas visible
QView.canvas.configure(width=400, height=260)
# tuple of activity images (correspond to self._philo_state)
self._act_img = (
PhotoImage(file=HOME_DIR + "/img/thinking.gif"),
PhotoImage(file=HOME_DIR + "/img/hungry.gif"),
PhotoImage(file=HOME_DIR + "/img/eating.gif"),
)
# tuple of philo canvas images (correspond to self._philo_obj)
self._philo_img = (\
QView.canvas.create_image(190, 57, image=self._act_img[0]),
QView.canvas.create_image(273, 100, image=self._act_img[0]),
QView.canvas.create_image(237, 185, image=self._act_img[0]),
QView.canvas.create_image(146, 185, image=self._act_img[0]),
QView.canvas.create_image(107, 100, image=self._act_img[0])
)
# button images for UP and DOWN
self.img_UP = PhotoImage(file=HOME_DIR + "/img/BTN_UP.gif")
self.img_DWN = PhotoImage(file=HOME_DIR + "/img/BTN_DWN.gif")
# images of a button for pause/serve
self.btn = QView.canvas.create_image(200, 120, image=self.img_UP)
QView.canvas.tag_bind(self.btn, "<ButtonPress-1>", self.cust_pause)
# request target reset on startup...
# NOTE: Normally, for an embedded application you would like
# to start with resetting the Target, to start clean with
# Qs dictionaries, etc.
#
# Howver, this is a desktop appliction, which you cannot reset
# (and restart). Therefore, the desktop applications must be started
# *after* the QView is already running.
#reset_target()
# on_reset() callback invoked when Target-reset packet is received
# NOTE: the QS dictionaries are not known at this time yet, so
# this callback shouild generally not set filters or current objects
def on_reset(self):
# (re)set the lists
self._philo_obj = [0, 0, 0, 0, 0]
self._philo_state = [0, 0, 0]
# on_run() callback invoked when the QF_RUN packet is received
# NOTE: the QS dictionaries are typically known at this time yet, so
# this callback can set filters or current objects
def on_run(self):
glb_filter("QS_QEP_TRAN")
# NOTE: the names of objects for current_obj() must match
# the QS Object Dictionaries produced by the application.
current_obj(OBJ_AO, "Table_inst")
# turn lists into tuples for better performance
self._philo_obj = tuple(self._philo_obj)
self._philo_state = tuple(self._philo_state)
# example of a custom command
def cust_command(self):
command(1, 12345)
# example of a custom interaction with a canvas object (pause/serve)
def cust_pause(self, event):
if QView.canvas.itemcget(self.btn, "image") != str(self.img_UP):
QView.canvas.itemconfig(self.btn, image=self.img_UP)
post("SERVE_SIG")
QView.print_text("Table SERVING")
else:
QView.canvas.itemconfig(self.btn, image=self.img_DWN)
post("PAUSE_SIG")
QView.print_text("Table PAUSED")
# intercept the QS_OBJ_DICT stadard packet
# this packet has the following structure:
# record-ID, seq-num, Object-ptr, Zero-terminated string
def QS_OBJ_DICT(self, packet):
data = qunpack("xxOZ", packet)
try:
# NOTE: the names of objects must match the QS Object Dictionaries
# produced by the application.
i = ("Philo_inst[0]",
"Philo_inst[1]",
"Philo_inst[2]",
"Philo_inst[3]",
"Philo_inst[4]").index(data[1])
self._philo_obj[i] = data[0]
except:
pass # dictionary for a different object
# intercept the QS_FUN_DICT stadard packet
# this packet has the following structure:
# record-ID, seq-num, Function-ptr, Zero-terminated string
def QS_FUN_DICT(self, packet):
data = qunpack("xxFZ", packet)
try:
# NOTE: the names of states must match the QS Object Dictionaries
# produced by the application.
j = ("Philo_thinking",
"Philo_hungry",
"Philo_eating").index(data[1])
self._philo_state[j] = data[0]
except:
pass # dictionary for a different state
# intercept the QS_QEP_TRAN stadard packet
# this packet has the following structure:
# record-ID, seq-num, Timestamp, Signal, Object-ptr,
# Function-ptr (source state), Function-ptr (new active state)
def QS_QEP_TRAN(self, packet):
data = qunpack("xxTSOFF", packet)
try:
i = self._philo_obj.index(data[2])
j = self._philo_state.index(data[4])
# animate the given philo image according to its activity
QView.canvas.itemconfig(self._philo_img[i],
image=self._act_img[j])
# print a message to the text view
QView.print_text("%010d Philo %d is %s"\
%(data[0], i, ("thinking", "hungry", "eating")[j]))
except:
pass # state-entry in a different object
#=============================================================================
# instantiate the DPP class and set it as the QView customization
QView.customize(DPP())
| 42.405229
| 78
| 0.625617
|
4a19821154f2b2bfbd7d439384a90c7b1a30bc70
| 3,008
|
py
|
Python
|
snips_nlu/cli/dataset/assistant_dataset.py
|
ddorian/snips-nlu
|
0934d386bb138ebb34764446416856cfac664e65
|
[
"Apache-2.0"
] | 1
|
2021-01-03T09:23:55.000Z
|
2021-01-03T09:23:55.000Z
|
snips_nlu/cli/dataset/assistant_dataset.py
|
ddorian/snips-nlu
|
0934d386bb138ebb34764446416856cfac664e65
|
[
"Apache-2.0"
] | null | null | null |
snips_nlu/cli/dataset/assistant_dataset.py
|
ddorian/snips-nlu
|
0934d386bb138ebb34764446416856cfac664e65
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from __future__ import unicode_literals, print_function
from pathlib import Path
from snips_nlu.cli.dataset.entities import CustomEntity, create_entity
from snips_nlu.cli.dataset.intent_dataset import IntentDataset
class AssistantDataset(object):
"""Dataset of an assistant
Merges a list of :class:`.AssistantDataset` into a single dataset ready to
be used by Snips NLU
Attributes:
language (str): language of the assistant
intents_datasets (list of :class:`.IntentDataset`): data of the
assistant intents
entities (list of :class:`.Entity`): data of the assistant entities
"""
def __init__(self, language, intent_datasets, entities):
self.language = language
self.intents_datasets = intent_datasets
self.entities = entities
@classmethod
def from_files(cls, language, filenames):
"""Creates an :class:`.AssistantDataset` from a language and a list of
intent and entity files
Args:
language (str): language of the assistant
filenames (list of str): Intent and entity files.
The assistant will associate each intent file to an intent,
and each entity file to an entity. For instance, the intent
file 'intent_setTemperature.txt' will correspond to the intent
'setTemperature', and the entity file 'entity_room.txt' will
correspond to the entity 'room'.
"""
intent_filepaths = set()
entity_filepaths = set()
for filename in filenames:
filepath = Path(filename)
stem = filepath.stem
if stem.startswith("intent_"):
intent_filepaths.add(filepath)
elif stem.startswith("entity_"):
entity_filepaths.add(filepath)
else:
raise AssertionError("Filename should start either with "
"'intent_' or 'entity_' but found: %s"
% stem)
intents_datasets = [IntentDataset.from_file(f)
for f in intent_filepaths]
entities = [CustomEntity.from_file(f) for f in entity_filepaths]
entity_names = set(e.name for e in entities)
# Add entities appearing only in the intents data
for intent_data in intents_datasets:
for entity_name in intent_data.entities_names:
if entity_name not in entity_names:
entity_names.add(entity_name)
entities.append(create_entity(entity_name))
return cls(language, intents_datasets, entities)
@property
def json(self):
intents = {intent_data.intent_name: intent_data.json
for intent_data in self.intents_datasets}
entities = {entity.name: entity.json for entity in self.entities}
return dict(language=self.language, intents=intents, entities=entities)
| 39.578947
| 79
| 0.631316
|
4a198286d05e3e9e78ec0bbc376a0d05232a778b
| 154
|
py
|
Python
|
mysite/rainbowfood/apps.py
|
ChiaraDM/RainbowFood
|
d62bbd002b4aad7450ffde20a2787d424c18d2f7
|
[
"Apache-2.0"
] | null | null | null |
mysite/rainbowfood/apps.py
|
ChiaraDM/RainbowFood
|
d62bbd002b4aad7450ffde20a2787d424c18d2f7
|
[
"Apache-2.0"
] | null | null | null |
mysite/rainbowfood/apps.py
|
ChiaraDM/RainbowFood
|
d62bbd002b4aad7450ffde20a2787d424c18d2f7
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class RainbowfoodConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'rainbowfood'
| 22
| 56
| 0.772727
|
4a1982e3f5eb9d2a758adeed5e499a650df907ef
| 645
|
py
|
Python
|
profiles/util.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/util.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/util.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User, AnonymousUser
def user_authenticated(user):
"""
Checks if a user is authenticated.
:param user: User to check
:return: True, if the useris verified and accepted general terms and conditions as well as the privacy policy;
False otherwise.
"""
if isinstance(user, User):
return \
user.is_authenticated and \
user.profile.verified and \
user.profile.accepted_general_terms_and_conditions and \
user.profile.accepted_privacy_policy
elif isinstance(user, AnonymousUser):
return False
else:
pass
| 30.714286
| 114
| 0.669767
|
4a1983517eb0310f5573f6239de4fbec9f939069
| 30,275
|
py
|
Python
|
xarray/core/accessor_str.py
|
DocOtak/xarray
|
01a9baa01b1378cbf3f324ea3c27150a3860d3d1
|
[
"Apache-2.0"
] | null | null | null |
xarray/core/accessor_str.py
|
DocOtak/xarray
|
01a9baa01b1378cbf3f324ea3c27150a3860d3d1
|
[
"Apache-2.0"
] | null | null | null |
xarray/core/accessor_str.py
|
DocOtak/xarray
|
01a9baa01b1378cbf3f324ea3c27150a3860d3d1
|
[
"Apache-2.0"
] | 2
|
2019-08-22T21:07:03.000Z
|
2020-03-30T10:25:00.000Z
|
# The StringAccessor class defined below is an adaptation of the
# pandas string methods source code (see pd.core.strings)
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import re
import textwrap
import numpy as np
from .computation import apply_ufunc
_cpython_optimized_encoders = (
"utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii"
)
_cpython_optimized_decoders = _cpython_optimized_encoders + (
"utf-16", "utf-32"
)
def _is_str_like(x):
return isinstance(x, str) or isinstance(x, bytes)
class StringAccessor:
"""Vectorized string functions for string-like arrays.
Similar to pandas, fields can be accessed through the `.str` attribute
for applicable DataArrays.
>>> da = xr.DataArray(['some', 'text', 'in', 'an', 'array'])
>>> ds.str.len()
<xarray.DataArray (dim_0: 5)>
array([4, 4, 2, 2, 5])
Dimensions without coordinates: dim_0
"""
def __init__(self, obj):
self._obj = obj
def _apply(self, f, dtype=None):
# TODO handling of na values ?
if dtype is None:
dtype = self._obj.dtype
g = np.vectorize(f, otypes=[dtype])
return apply_ufunc(
g, self._obj, dask='parallelized', output_dtypes=[dtype])
def len(self):
'''
Compute the length of each element in the array.
Returns
-------
lengths array : array of int
'''
return self._apply(len, dtype=int)
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def get(self, i):
'''
Extract element from indexable in each element in the array.
Parameters
----------
i : int
Position of element to extract.
default : optional
Value for out-of-range index. If not specified (None) defaults to
an empty string.
Returns
-------
items : array of objects
'''
obj = slice(-1, None) if i == -1 else slice(i, i + 1)
return self._apply(lambda x: x[obj])
def slice(self, start=None, stop=None, step=None):
'''
Slice substrings from each element in the array.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
sliced strings : same type as values
'''
s = slice(start, stop, step)
f = lambda x: x[s]
return self._apply(f)
def slice_replace(self, start=None, stop=None, repl=''):
'''
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified, the sliced region
is replaced with an empty string.
Returns
-------
replaced : same type as values
'''
repl = self._obj.dtype.type(repl)
def f(x):
if len(x[start:stop]) == 0:
local_stop = start
else:
local_stop = stop
y = self._obj.dtype.type('')
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return self._apply(f)
def capitalize(self):
'''
Convert strings in the array to be capitalized.
Returns
-------
capitalized : same type as values
'''
return self._apply(lambda x: x.capitalize())
def lower(self):
'''
Convert strings in the array to lowercase.
Returns
-------
lowerd : same type as values
'''
return self._apply(lambda x: x.lower())
def swapcase(self):
'''
Convert strings in the array to be swapcased.
Returns
-------
swapcased : same type as values
'''
return self._apply(lambda x: x.swapcase())
def title(self):
'''
Convert strings in the array to titlecase.
Returns
-------
titled : same type as values
'''
return self._apply(lambda x: x.title())
def upper(self):
'''
Convert strings in the array to uppercase.
Returns
-------
uppered : same type as values
'''
return self._apply(lambda x: x.upper())
def isalnum(self):
'''
Check whether all characters in each string are alphanumeric.
Returns
-------
isalnum : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.isalnum(), dtype=bool)
def isalpha(self):
'''
Check whether all characters in each string are alphabetic.
Returns
-------
isalpha : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.isalpha(), dtype=bool)
def isdecimal(self):
'''
Check whether all characters in each string are decimal.
Returns
-------
isdecimal : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.isdecimal(), dtype=bool)
def isdigit(self):
'''
Check whether all characters in each string are digits.
Returns
-------
isdigit : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.isdigit(), dtype=bool)
def islower(self):
'''
Check whether all characters in each string are lowercase.
Returns
-------
islower : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.islower(), dtype=bool)
def isnumeric(self):
'''
Check whether all characters in each string are numeric.
Returns
-------
isnumeric : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.isnumeric(), dtype=bool)
def isspace(self):
'''
Check whether all characters in each string are spaces.
Returns
-------
isspace : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.isspace(), dtype=bool)
def istitle(self):
'''
Check whether all characters in each string are titlecase.
Returns
-------
istitle : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.istitle(), dtype=bool)
def isupper(self):
'''
Check whether all characters in each string are uppercase.
Returns
-------
isupper : array of bool
Array of boolean values with the same shape as the original array.
'''
return self._apply(lambda x: x.isupper(), dtype=bool)
def count(self, pat, flags=0):
'''
Count occurrences of pattern in each string of the array.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~xarray.DatArray`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
Returns
-------
counts : array of int
'''
pat = self._obj.dtype.type(pat)
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return self._apply(f, dtype=int)
def startswith(self, pat):
'''
Test if the start of each string element matches a pattern.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
Returns
-------
startswith : array of bool
An array of booleans indicating whether the given pattern matches
the start of each string element.
'''
pat = self._obj.dtype.type(pat)
f = lambda x: x.startswith(pat)
return self._apply(f, dtype=bool)
def endswith(self, pat):
'''
Test if the end of each string element matches a pattern.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
Returns
-------
endswith : array of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
'''
pat = self._obj.dtype.type(pat)
f = lambda x: x.endswith(pat)
return self._apply(f, dtype=bool)
def pad(self, width, side='left', fillchar=' '):
'''
Pad strings in the array up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
filled : same type as values
Array with a minimum number of char in each element.
'''
width = int(width)
fillchar = self._obj.dtype.type(fillchar)
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if side == 'left':
f = lambda s: s.rjust(width, fillchar)
elif side == 'right':
f = lambda s: s.ljust(width, fillchar)
elif side == 'both':
f = lambda s: s.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return self._apply(f)
def center(self, width, fillchar=' '):
'''
Filling left and right side of strings in the array with an
additional character.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : same type as values
'''
return self.pad(width, side='both', fillchar=fillchar)
def ljust(self, width, fillchar=' '):
'''
Filling right side of strings in the array with an additional
character.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : same type as values
'''
return self.pad(width, side='right', fillchar=fillchar)
def rjust(self, width, fillchar=' '):
'''
Filling left side of strings in the array with an additional character.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : same type as values
'''
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
'''
Pad strings in the array by prepending '0' characters.
Strings in the array are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the array with length greater or equal to `width` are unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
filled : same type as values
'''
return self.pad(width, side='left', fillchar='0')
def contains(self, pat, case=True, flags=0, regex=True):
'''
Test if pattern or regex is contained within a string of the array.
Return boolean array based on whether a given pattern or regex is
contained within a string of the array.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
contains : array of bool
An array of boolean values indicating whether the
given pattern is contained within the string of each element
of the array.
'''
pat = self._obj.dtype.type(pat)
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0: # pragma: no cover
raise ValueError("This pattern has match groups.")
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
uppered = self._obj.str.upper()
return uppered.str.contains(pat.upper(), regex=False)
return self._apply(f, dtype=bool)
def match(self, pat, case=True, flags=0):
'''
Determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matched : array of bool
'''
if not case:
flags |= re.IGNORECASE
pat = self._obj.dtype.type(pat)
regex = re.compile(pat, flags=flags)
f = lambda x: bool(regex.match(x))
return self._apply(f, dtype=bool)
def strip(self, to_strip=None, side='both'):
'''
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the array from left and/or right sides.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
side : {'left', 'right', 'both'}, default 'left'
Side from which to strip.
Returns
-------
stripped : same type as values
'''
if to_strip is not None:
to_strip = self._obj.dtype.type(to_strip)
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return self._apply(f)
def lstrip(self, to_strip=None):
'''
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the array from the left side.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
stripped : same type as values
'''
return self.strip(to_strip, side='left')
def rstrip(self, to_strip=None):
'''
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the array from the right side.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
stripped : same type as values
'''
return self.strip(to_strip, side='right')
def wrap(self, width, **kwargs):
'''
Wrap long strings in the array to be formatted in paragraphs with
length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by
string.whitespace) remaining after tab expansion will be replaced
by a single space (default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to
ensure that no lines are longer than width. If it is false, long
words will not be broken, and some lines may be longer than width.
(default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right
after hyphens in compound words, as it is customary in English. If
false, only whitespaces will be considered as potentially good
places for line breaks, but you need to set break_long_words to
false if you want truly insecable words. (default: True)
Returns
-------
wrapped : same type as values
'''
tw = textwrap.TextWrapper(width=width)
f = lambda x: '\n'.join(tw.wrap(x))
return self._apply(f)
def translate(self, table):
'''
Map all characters in the string through the given mapping table.
Parameters
----------
table : dict
A a mapping of Unicode ordinals to Unicode ordinals, strings,
or None. Unmapped characters are left untouched. Characters mapped
to None are deleted. :meth:`str.maketrans` is a helper function for
making translation tables.
Returns
-------
translated : same type as values
'''
f = lambda x: x.translate(table)
return self._apply(f)
def repeat(self, repeats):
'''
Duplicate each string in the array.
Parameters
----------
repeats : int
Number of repetitions.
Returns
-------
repeated : same type as values
Array of repeated string objects.
'''
f = lambda x: repeats * x
return self._apply(f)
def find(self, sub, start=0, end=None, side='left'):
'''
Return lowest or highest indexes in each strings in the array
where the substring is fully contained between [start:end].
Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Starting side for search.
Returns
-------
found : array of integer values
'''
sub = self._obj.dtype.type(sub)
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return self._apply(f, dtype=int)
def rfind(self, sub, start=0, end=None):
'''
Return highest indexes in each strings in the array
where the substring is fully contained between [start:end].
Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : array of integer values
'''
return self.find(sub, start=start, end=end, side='right')
def index(self, sub, start=0, end=None, side='left'):
'''
Return lowest or highest indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.find`` except instead of returning -1, it raises a ValueError
when the substring is not found.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Starting side for search.
Returns
-------
found : array of integer values
'''
sub = self._obj.dtype.type(sub)
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return self._apply(f, dtype=int)
def rindex(self, sub, start=0, end=None):
'''
Return highest indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.rfind`` except instead of returning -1, it raises a ValueError
when the substring is not found.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : array of integer values
'''
return self.index(sub, start=start, end=end, side='right')
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
'''
Replace occurrences of pattern/regex in the array with some string.
Parameters
----------
pat : string or compiled regex
String can be a character sequence or regular expression.
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
regex : boolean, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
Returns
-------
replaced : same type as values
A copy of the object with all matching occurrences of `pat`
replaced by `repl`.
'''
if not (_is_str_like(repl) or callable(repl)): # pragma: no cover
raise TypeError("repl must be a string or callable")
if _is_str_like(pat):
pat = self._obj.dtype.type(pat)
if _is_str_like(repl):
repl = self._obj.dtype.type(repl)
is_compiled_re = isinstance(pat, type(re.compile('')))
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError("case and flags cannot be set"
" when pat is a compiled regex")
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError("Cannot use a compiled regex as replacement "
"pattern with regex=False")
if callable(repl):
raise ValueError("Cannot use a callable replacement when "
"regex=False")
f = lambda x: x.replace(pat, repl, n)
return self._apply(f)
def decode(self, encoding, errors='strict'):
'''
Decode character string in the array using indicated encoding.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
decoded : same type as values
'''
if encoding in _cpython_optimized_decoders:
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return self._apply(f, dtype=np.str_)
def encode(self, encoding, errors='strict'):
'''
Encode character string in the array using indicated encoding.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : same type as values
'''
if encoding in _cpython_optimized_encoders:
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return self._apply(f, dtype=np.bytes_)
| 31.602296
| 79
| 0.563567
|
4a1983a9a8eed5a495aca44167b7c2aeeea23f79
| 1,888
|
py
|
Python
|
mlresearch/__init__.py
|
joaopfonseca/ml-research
|
a2a063e341010397bd13df812109f31ce05ac9f7
|
[
"MIT"
] | 1
|
2021-12-13T09:27:06.000Z
|
2021-12-13T09:27:06.000Z
|
mlresearch/__init__.py
|
joaopfonseca/research
|
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
|
[
"MIT"
] | 20
|
2021-12-10T11:54:59.000Z
|
2022-03-18T17:55:33.000Z
|
mlresearch/__init__.py
|
joaopfonseca/research
|
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
|
[
"MIT"
] | null | null | null |
"""Toolbox to develop research in Machine Learning.
``research`` is a library containing the implementation of various algorithms developed
in Machine Learning research, as well as utilities to facilitate the formatting of pandas
dataframes into LaTeX tables.
Subpackages
-----------
active_learning
Module which contains the code developed for experiments related to Active Learning.
data_augmentation
Module which contains the implementation of variations of oversampling/data
augmentation algorithms, as well as helper classes to use oversampling algorithms as
data augmentation techniques.
datasets
Module which contains code to download, transform and simulate various datasets.
metrics
Module which contains performance metrics/scorers that are not
included in scikit-learn's scorers' dictionary.
utils
contains a variety of general utility functions and tools used to format and prepare
tables to incorporate into LaTeX code.
"""
import sys
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to enable importing subpackages of sklearn when
# the binaries are not built
# mypy error: Cannot determine type of '__SKLEARN_SETUP__'
__MLRESEARCH_SETUP__ # type: ignore
except NameError:
__MLRESEARCH_SETUP__ = False
if __MLRESEARCH_SETUP__:
sys.stderr.write("Partial import of imblearn during the build process.\n")
# We are not importing the rest of scikit-learn during the build
# process, as it may not be compiled yet
else:
from . import active_learning
from . import data_augmentation
from . import datasets
from . import metrics
from . import utils
from ._version import __version__
__all__ = [
"active_learning",
"data_augmentation",
"datasets",
"metrics",
"utils",
"__version__",
]
| 33.714286
| 89
| 0.741525
|
4a19847884aa1621c530bdb1d2f39b0cf8ce4ee0
| 2,273
|
py
|
Python
|
FNETR/src/evaluate.py
|
rajathpatel23/joint-kge-fnet-lm
|
413371c96b2a7ffa734a47c03b631233b089d74b
|
[
"MIT"
] | 3
|
2021-03-16T22:56:55.000Z
|
2022-02-12T20:09:37.000Z
|
FNETR/src/evaluate.py
|
rajathpatel23/joint-kge-fnet-lm
|
413371c96b2a7ffa734a47c03b631233b089d74b
|
[
"MIT"
] | null | null | null |
FNETR/src/evaluate.py
|
rajathpatel23/joint-kge-fnet-lm
|
413371c96b2a7ffa734a47c03b631233b089d74b
|
[
"MIT"
] | null | null | null |
import sys
def f1(p, r):
if r == 0.:
return 0.
return 2 * p * r / float(p + r)
def strict(true_and_prediction):
num_entities = len(true_and_prediction)
correct_num = 0.
for true_labels, predicted_labels in true_and_prediction:
correct_num += set(true_labels) == set(predicted_labels)
precision = recall = correct_num / num_entities
return precision, recall, f1(precision, recall)
def loose_macro(true_and_prediction):
num_entities = len(true_and_prediction)
p = 0.
r = 0.
for true_labels, predicted_labels in true_and_prediction:
if len(predicted_labels) > 0:
p += len(set(predicted_labels).intersection(set(true_labels))) / float(len(predicted_labels))
if len(true_labels):
r += len(set(predicted_labels).intersection(set(true_labels))) / float(len(true_labels))
precision = p / num_entities
recall = r / num_entities
return precision, recall, f1(precision, recall)
def loose_micro(true_and_prediction):
num_predicted_labels = 0.
num_true_labels = 0.
num_correct_labels = 0.
for true_labels, predicted_labels in true_and_prediction:
num_predicted_labels += len(predicted_labels)
num_true_labels += len(true_labels)
num_correct_labels += len(set(predicted_labels).intersection(set(true_labels)))
precision = num_correct_labels / num_predicted_labels
recall = num_correct_labels / num_true_labels
return precision, recall, f1(precision, recall)
if __name__ == "__main__":
file = open(sys.argv[1])
true_and_prediction = []
for line in file:
temp = line.split("\t")
if len(temp) == 1:
true_labels = temp[0].split()
predicted_labels = []
else:
true_labels, predicted_labels = temp
true_labels = true_labels.split()
predicted_labels = predicted_labels.split()
true_and_prediction.append((true_labels, predicted_labels))
# for each in true_and_prediction:
# print(each)
print(" strict (p,r,f1):", strict(true_and_prediction))
print("loose macro (p,r,f1):", loose_macro(true_and_prediction))
print("loose micro (p,r,f1):", loose_micro(true_and_prediction))
file.close()
| 34.969231
| 105
| 0.672239
|
4a1984917ab0022fa30f645dbf03d1ec43d19cca
| 140,175
|
py
|
Python
|
thwipper.py
|
spidey711/Thwipper-Bot
|
9a847b5b519f5ad0112528ad72d0f1f877e01c58
|
[
"MIT"
] | 2
|
2021-09-04T16:28:23.000Z
|
2021-10-03T13:07:12.000Z
|
thwipper.py
|
spidey711/Thwipper-bot
|
9a847b5b519f5ad0112528ad72d0f1f877e01c58
|
[
"MIT"
] | null | null | null |
thwipper.py
|
spidey711/Thwipper-bot
|
9a847b5b519f5ad0112528ad72d0f1f877e01c58
|
[
"MIT"
] | 3
|
2021-07-06T09:04:33.000Z
|
2021-10-03T11:38:21.000Z
|
# IMPORTS
try:
import discord
from discord.utils import get
from discord.ext import commands, tasks
from links import *
from responses import *
from dotenv import load_dotenv
import mysql.connector as ms
import os
import random
import calendar
import pytz
import datetime
import asyncio
import regex
import praw
import pytube
import imdb
import requests
import aiohttp
import urllib.request
import youtube_dl
import wikipedia
from googlesearch import search
from cryptography.fernet import Fernet
print("All modules and libraries imported...")
except ImportError as ie:
print(ie)
# SETUP
prefixes = ["t!", "_", "|"]
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(
command_prefix=[prefix for prefix in prefixes],
intents=intents,
case_insensitive=True,
)
color = discord.Color.from_rgb(223, 31, 45)
bot.remove_command("help")
# Enviroment Variables
global auth
load_dotenv(".env")
auth = os.getenv("transformer_auth")
# SNIPE
deleted_messages = {}
# NUMBER OF REQUESTS
num = 0
# MUSIC
server_index = {}
FFMPEG_OPTS = {
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5",
"options": "-vn",
}
ydl_op = {
"format": "bestaudio/best",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "128",
}
],
}
# DEFAULT TIMEZONE
default_tz = "Asia/Kolkata"
# ENCRYPTER DECRYPTER
key = Fernet.generate_key()
cipher = Fernet(key)
# REDDIT
reddit = praw.Reddit(
client_id=os.getenv("reddit_client_id"),
client_secret=os.getenv("reddit_client_secret"),
user_agent=os.getenv("reddit_user_agent"),
username=os.getenv("reddit_username"),
password=os.getenv("reddit_userpass"),
)
default_topic = {}
# HELP MENU
help_toggle = 0
# QUIPS
dialogue_list = []
# SQL
conn = ms.connect(
user="root", host="localhost", password=os.getenv("sql_pass"), database="discord"
)
cursor = conn.cursor()
# ---------------------------------------------- NON ASYNC FUNCTIONS -----------------------------------------
def help_menu():
global help_toggle
embed_help_menu = discord.Embed(
title="🕸𝗖𝗼𝗺𝗺𝗮𝗻𝗱 𝗠𝗲𝗻𝘂🕸", description="Prefixes: t! _ |", color=color
)
embed_help_menu.set_thumbnail(url=random.choice(url_thumbnails))
embed_help_menu.set_footer(text="New Features Coming Soon 🛠")
if help_toggle == 0 or help_toggle < 0:
help_toggle = 0
embed_help_menu.add_field(
name="𝗦𝘁𝗮𝗻𝗱𝗮𝗿𝗱",
value="`hello` to greet bot\n`help` to get this menu\n`quips` to get a famous dialogue or plot\n`@Thwipper` to get more info about thwipper",
inline=False,
)
embed_help_menu.set_image(url=bot.user.avatar_url)
if help_toggle == 1:
embed_help_menu.add_field(
name="𝗜𝗻𝘁𝗲𝗿𝗻𝗲𝘁",
value="w `topic` for wikipedia\ng `topic` to google\nimdb `movie` to get movie details from IMDb\n reddit `topic` to get reddit memes",
inline=False,
)
embed_help_menu.set_image(url=help_page1)
if help_toggle == 2:
embed_help_menu.add_field(
name="𝗗𝗧𝗖",
value="dt `timezone` to get IST date and time\ncal `year` `month` to get calendar\nNote: The default timezone is set as `Asia/Kolkata`",
inline=False,
)
embed_help_menu.set_image(url=help_page2)
if help_toggle == 3:
embed_help_menu.add_field(
name="𝗦𝗵𝗲𝗹𝗹𝘀",
value="; `query` to use SQL Shell\npy `expression` for python shell\npydoc `function` to get information about that python function\nNote: The functions, when using `pydoc` command, will not be executed. Try without `()`.",
inline=False,
)
embed_help_menu.set_image(url=help_page3)
if help_toggle == 4:
embed_help_menu.add_field(
name="𝗘𝗻𝗰𝗿𝘆𝗽𝘁𝗲𝗿 𝗗𝗲𝗰𝗿𝘆𝗽𝘁𝗲𝗿",
value="hush `en` `text` to encrypt message\nhush `dec` `text` to decrypt message\n",
inline=False,
)
embed_help_menu.set_image(url=help_page4)
if help_toggle == 5:
embed_help_menu.add_field(
name="𝗦𝗽𝗶𝗱𝗲𝗿-𝗣𝘂𝗻𝗸 𝗥𝗮𝗱𝗶𝗼™",
value="🔉 `cn` to get the bot to join voice channel\n🔇 `dc` to remove bot from voice channel\n🎶 p `name` or `index` to play songs\n▶ `res` to resume a song\n⏸ `pause` to pause a song\n⏹ `st` to stop a song\n🔂 `rep` to repeat song\n⏭ `skip` to skip song\n⏮ `prev` for previous song\n*️⃣ `songinfo` to get current song\n🔠 `q` to display queue\n🔼 scroll queue`up`\n🔽scroll queue `down`\n✔ q `name` to add a song to the queue\n❌ rem `index` to remove song from queue\n💥 `cq` to clear queue",
inline=False,
)
embed_help_menu.set_image(url=help_page5)
if help_toggle == 6:
embed_help_menu.add_field(
name="𝗕𝗶𝗿𝘁𝗵𝗱𝗮𝘆𝘀",
value="addbday `mention` `month` `day` to add a user's birthday from DB\n`bday` to get thwipper to wish the members\nrembday `mention` to remove a member's birthday.",
inline=False,
)
embed_help_menu.set_image(url=help_page6)
if help_toggle == 7 or help_toggle > 7:
help_toggle = 7
embed_help_menu.add_field(
name="𝗨𝘁𝗶𝗹𝗶𝘁𝘆",
value="`req` to get number of requests\n`web` to see deleted message\n`ping` to get bot's latency\n`serverinfo` to get server's information\npfp `mention` to get user's profile picture\n`setbit` to set quality of bitrate\n`polls` to see how to conduct a poll",
inline=False,
)
embed_help_menu.set_image(url=help_page7)
return embed_help_menu
def time_converter(seconds):
mins, secs = divmod(seconds, 60)
hours, mins = divmod(mins, 60)
if hours == 0:
return "%02d mins %02d secs" % (mins, secs)
if hours > 0:
return "%d hrs %02d mins %02d secs" % (hours, mins, secs)
def youtube_download(ctx, url):
if True:
with youtube_dl.YoutubeDL(ydl_op) as ydl:
URL = ydl.extract_info(url, download=False)["formats"][0]["url"]
return URL
def requests_query():
global cursor
operation = "INSERT INTO requests(number)VALUES({})".format(num)
cursor.execute(operation)
def number_of_requests():
global num # num = 0
num += 1
requests_query()
# ----------------------------------------- EVENTS --------------------------------------
@bot.event
async def on_ready():
print("{0.user} is now online...\nHey Tamonud! How's it going?".format(bot))
stop = 0
# QUIPS
global dialogue_list
site = (
requests.get("https://geektrippers.com/spiderman-quotes/")
.content.decode()
.replace("<br>", "\n")
.replace("<strong>", " ")
.replace("</strong>", " ")
.replace("<em>", " ")
.replace("</em>", " ")
.replace("’", "'")
.replace("”", '"\n\r')
.replace("…", "...")
.replace("“", '"')
.replace(" ", " ")
.replace("–", "-")
.replace("‘", "'")
.replace("]", "]\n")
.replace("[", "\n[")
)
for i in range(0, 1000):
q = site.find(
'<p class="has-background" style="background-color:#dedfe0">', stop
) + len('<p class="has-background style="background-color:#dedfe0">')
w = site.find("</p>", stop)
stop = w + len("</p>")
dialogues = ""
if not site[q:w]:
continue
else:
dialogues = site[q:w]
dialogue_list += [dialogues]
# STATUSES
@tasks.loop(minutes=10)
async def multiple_statuses():
while True:
for status in status_list:
await asyncio.sleep(300)
await bot.change_presence(
activity=discord.Activity(
type=discord.ActivityType.playing, name=status
)
)
multiple_statuses.start()
# UPDATION
@tasks.loop(seconds=5.0)
async def updation():
# REQUESTS UPDATE
global cursor
global num
op = "SELECT MAX(number) FROM requests"
cursor.execute(op)
req1 = cursor.fetchall()
req2 = str(req1).replace("[(", " ").replace(",)]", " ")
num = int(req2)
conn.commit()
updation.start()
async def transformer(api, header, json):
async with aiohttp.ClientSession() as session:
async with session.post(api, headers=header, json=json) as resp:
return await resp.json()
@bot.event
async def on_message(message):
headeras = {"Authorization": auth}
API_URL = (
"https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill"
)
if message.content.lower().startswith("thwip"):
past_respose = []
generated = []
input_text = message.content.lower().replace("thwip", "")
payload = {
"inputs": {
"past_user_inputs": past_respose,
"generated_responses": generated,
"text": input_text,
},
}
output = await transformer(API_URL, header=headeras, json=payload)
if len(past_respose) < 100:
past_respose.append(input_text)
generated.append(output["generated_text"])
else:
past_respose.pop(0)
generated.pop(0)
past_respose.append(input_text)
generated.append(output["generated_text"])
await message.reply(output["generated_text"])
if f"<@!{bot.user.id}>" == message.content:
number_of_requests()
embed = discord.Embed(
title="About",
description=f"Hi {message.author.name}!\nI am Thwipper. I aim to be a multipurpose bot. From music to memes, I have it all 😎",
color=color,
)
embed.add_field(
name="Made By", value="[Tamonud](https://github.com/spidey711)", inline=True
)
embed.add_field(
name="Source Code",
value="[Thwipper](https://github.com/spidey711/Thwipper-bot)",
inline=True,
)
embed.set_thumbnail(url=bot.user.avatar_url)
# embed.set_image(url="https://txt.1001fonts.net/img/txt/dHRmLjcyLjAwMDAwMC5WRWhYU1ZCUVJWSSwuMA,,/lazenby-computer.liquid.png")
embed.set_footer(
text="Type _help for command menu", icon_url=message.author.avatar_url
)
await message.reply(embed=embed)
else:
await bot.process_commands(message)
async def genpost(api, header, json):
async with aiohttp.ClientSession() as session:
async with session.post(api, headers=header, json=json) as resp:
return await resp.json()
@bot.command()
async def gen(ctx, *, text):
API_URL2 = "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-2.7B"
header2 = {"Authorization": auth}
payload2 = {
"inputs": text,
"parameters": {"max_new_tokens": 250, "return_full_text": True},
}
output = await genpost(API_URL2, header2, payload2)
await ctx.send(
embed=discord.Embed(
title="Generated text", description=output[0]["generated_text"], color=color
)
)
@bot.event
async def on_message_delete(message):
if not message.channel.id in list(deleted_messages.keys()):
deleted_messages[message.channel.id] = []
if len(message.embeds) <= 0:
deleted_messages[message.channel.id].append(
(str(message.author.id), message.content)
)
else:
deleted_messages[message.channel.id].append(
(str(message.author.id), message.embeds[0], True)
)
@bot.event
async def on_reaction_add(reaction, user):
number_of_requests()
if not user.bot:
if reaction.emoji == "🖱":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
try:
sub = reddit.subreddit(
default_topic[str(reaction.message.guild.id)]
).random()
embed = discord.Embed(
description="**Caption:\n**{}".format(sub.title), color=color
)
embed.set_author(
name="Post by: {}".format(sub.author), icon_url=url_reddit_author
)
embed.set_thumbnail(url=url_reddit_thumbnail)
embed.set_image(url=sub.url)
embed.set_footer(
text="🔺: {} 🔻: {} 💬: {}".format(
sub.ups, sub.downs, sub.num_comments
)
)
await reaction.message.edit(embed=embed)
except Exception:
embed = discord.Embed(
description="Default topic is not set", color=color
)
embed.set_author(name="Uh oh...", icon_url=url_reddit_author)
await reaction.message.edit(embed=embed)
global help_toggle
if reaction.emoji == "➡":
help_toggle += 1
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
await reaction.message.edit(embed=help_menu())
if reaction.emoji == "⬅":
help_toggle -= 1
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
await reaction.message.edit(embed=help_menu())
if reaction.emoji == "🕸":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
embed = discord.Embed(
title="🕸Mutual Guilds🕸",
description="\n".join(
[servers.name for servers in user.mutual_guilds]
),
color=color,
)
embed.set_thumbnail(url=random.choice(url_thumbnails))
embed.set_footer(text="New Features Coming Soon 🛠")
await reaction.message.edit(embed=embed)
# MUSIC PLAYER
voice = discord.utils.get(
bot.voice_clients, guild=reaction.message.guild)
voice_client = reaction.message.guild.voice_client
playing = reaction.message.guild.voice_client.is_playing()
pause = reaction.message.guild.voice_client.is_paused()
# SERVER QUEUE
operation_view = "SELECT * FROM music_queue WHERE server={}".format(
str(reaction.message.guild.id)
)
cursor.execute(operation_view)
server_queue = cursor.fetchall()
members_in_vc = [
str(names) for names in reaction.message.guild.voice_client.channel.members
]
string = ""
if reaction.emoji == "🔼":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
index = server_index[str(reaction.message.guild.id)]
try:
index -= 10
for song in server_queue[index: index + 20]:
string += (
str(index)
+ ") "
+ f"{song[0]}\n".replace(" - YouTube", " ")
)
index += 1
embed = discord.Embed(description=string, color=color)
embed.set_author(
name=f"{reaction.message.guild.name}'s Playlist",
icon_url=url_author_music,
)
embed.set_thumbnail(url=random.choice(url_thumbnail_music))
embed.set_footer(
text=f"Number Of Songs: {len(server_queue)}")
await reaction.message.edit(embed=embed)
except KeyError:
embed = discord.Embed(
description=random.choice(default_index), color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "🔽":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
index = server_index[str(reaction.message.guild.id)]
try:
for song in server_queue[index: index + 20]:
string += (
str(index)
+ ") "
+ f"{song[0]}\n".replace(" - YouTube", " ")
)
index += 1
embed = discord.Embed(description=string, color=color)
embed.set_author(
name=f"{reaction.message.guild.name}'s Playlist",
icon_url=url_author_music,
)
embed.set_thumbnail(url=random.choice(url_thumbnail_music))
embed.set_footer(text=f"Number Of Songs: {len(server_queue)}")
index += 10
await reaction.message.edit(embed=embed)
except KeyError:
embed = discord.Embed(
description=random.choice(default_index), color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "🔠":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
try:
index = server_index[str(reaction.message.guild.id)] - 10
if server_index[str(reaction.message.guild.id)] > 10:
for song in server_queue[index: index + 20]:
string += (
str(index)
+ ") "
+ f"{song[0]}\n".replace(" - YouTube", " ")
)
index += 1
embed = discord.Embed(description=string, color=color)
embed.set_author(
name=f"{reaction.message.guild.name}'s Playlist",
icon_url=url_author_music,
)
embed.set_thumbnail(
url=random.choice(url_thumbnail_music))
embed.set_footer(
text=f"Number Of Songs: {len(server_queue)}")
await reaction.message.edit(embed=embed)
else:
index = server_index[str(reaction.message.guild.id)]
for song in server_queue[index: index + 20]:
string += (
str(index)
+ ") "
+ f"{song[0]}\n".replace(" - YouTube", " ")
)
index += 1
embed = discord.Embed(description=string, color=color)
embed.set_author(
name=f"{reaction.message.guild.name}'s Playlist",
icon_url=url_author_music,
)
embed.set_thumbnail(
url=random.choice(url_thumbnail_music))
embed.set_footer(
text=f"Number Of Songs: {len(server_queue)}")
await reaction.message.edit(embed=embed)
except KeyError:
embed = discord.Embed(
description=random.choice(default_index), color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "▶":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if members_in_vc.count(str(user)) > 0:
try:
if server_index[str(reaction.message.guild.id)] is not None:
if pause == True:
voice_client.resume()
embed = discord.Embed(
description="Song has resumed playing 🎸",
color=color,
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
else:
if playing == True:
embed = discord.Embed(
description="Song is not paused 🤔", color=color
)
embed.set_author(
name="Spider-Punk Radio™",
icon_url=url_author_music,
)
embed.set_footer(
text=f"Voice Channel Bitrate: {reaction.message.guild.voice_client.channel.bitrate/1000} kbps"
)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description="Nothing is playing right now ❗",
color=color,
)
embed.set_author(
name="Spider-Punk Radio™",
icon_url=url_author_music,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
else:
if playing != True:
voice_client.resume()
embed = discord.Embed(
description="Song has resumed playing ▶",
color=color,
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description="Song is already playing 🎸", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(
name="Error", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
users = set()
message = await reaction.message.channel.fetch_message(
reaction.message
)
for reaction in message.reactions:
async for user in reaction.users():
users.add(user)
str1 = ",".join([str(users)])
pre_li = str1.replace("{", "").replace("}", "")
li = list(pre_li.split(",")) # li[-1]
embed = discord.Embed(
description=f"Connect to the voice channel first 🔊", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "⏸":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if members_in_vc.count(str(user)) > 0:
try:
if playing == True:
voice_client.pause()
embed = discord.Embed(
description="Song is paused ⏸", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
else:
if pause == True:
embed = discord.Embed(
description="Song is already paused ⏸", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description="No song playing currently ❗",
color=color,
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(
name="Error", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description=f"Connect to the voice channel first 🔊", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "⏮":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
server_index[str(reaction.message.guild.id)] -= 1
if members_in_vc.count(str(user)) > 0:
try:
URL_queue = youtube_download(
reaction.message,
server_queue[server_index[str(reaction.message.guild.id)]][
1
],
)
if playing != True:
embed = discord.Embed(
description="**Song: **{a}\n**Queue Index: **{b}".format(
a=server_queue[
server_index[str(
reaction.message.guild.id)]
][0],
b=server_index[str(
reaction.message.guild.id)],
).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now playing", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{a}\n**Queue Index: **{b}".format(
a=server_queue[
server_index[str(
reaction.message.guild.id)]
][0],
b=server_index[str(
reaction.message.guild.id)],
).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now playing", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
except IndexError:
embed = discord.Embed(
description="Looks like there is no song at this index",
color=color,
)
embed.set_author(
name="Oops...", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description=f"Connect to the voice channel first 🔊", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "⏭":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
server_index[str(reaction.message.guild.id)] += 1
if members_in_vc.count(str(user)) > 0:
try:
URL_queue = youtube_download(
reaction.message,
server_queue[server_index[str(reaction.message.guild.id)]][
1
],
)
if playing != True:
embed = discord.Embed(
description="**Song: **{a}\n**Queue Index: **{b}".format(
a=server_queue[
server_index[str(
reaction.message.guild.id)]
][0],
b=server_index[str(
reaction.message.guild.id)],
).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now Playing", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{a}\n**Queue Index: **{b}".format(
a=server_queue[
server_index[str(
reaction.message.guild.id)]
][0],
b=server_index[str(
reaction.message.guild.id)],
).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now Playing", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
except IndexError:
embed = discord.Embed(
description="Looks like there is no song at this index",
color=color,
)
embed.set_author(
name="Oops...", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description=f"Connect to the voice channel first 🔊", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "⏹":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if members_in_vc.count(str(user)) > 0:
try:
if playing == True or pause == True:
voice_client.stop()
embed = discord.Embed(
description="Song has been stopped ⏹", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description="Nothing is playing at the moment ❗",
color=color,
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(
name="Error", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description=f"Connect to the voice channel first 🔊", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "*️⃣":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if len(server_queue) <= 0:
embed = discord.Embed(
description=random.choice(empty_queue), color=color
)
embed.set_author(
name="Uh oh...", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
try:
try:
embed = discord.Embed(
description="**Song: **{a}\n**Index: **{b}\n**Views: **{c}\n**Description: **\n{d}".format(
a=server_queue[
server_index[str(
reaction.message.guild.id)]
][0],
b=server_index[str(reaction.message.guild.id)],
c=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).views,
d=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).description,
),
color=color,
)
embed.set_author(
name="Currently Playing",
url=server_queue[
server_index[str(reaction.message.guild.id)]
][1],
icon_url=url_author_music,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
await reaction.message.edit(embed=embed)
except discord.errors.HTTPException:
embed = discord.Embed(
description="**Song: **{a}\n**Index: **{b}\n**Views: **{c}\n**Description: **\n{d}".format(
a=server_queue[
server_index[str(
reaction.message.guild.id)]
][0],
b=server_index[str(reaction.message.guild.id)],
c=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).views,
d=random.choice(description_embed_errors),
),
color=color,
)
embed.set_author(
name="Currently Playing",
url=server_queue[
server_index[str(reaction.message.guild.id)]
][1],
icon_url=url_author_music,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
await reaction.message.edit(embed=embed)
except KeyError:
embed = discord.Embed(
description="Looks like you weren't playing anything before this so there is no current song. Play song from queue to set a current song",
color=color,
)
embed.set_author(
name="Uh oh...", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
if reaction.emoji == "🔂":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if members_in_vc.count(str(user)) > 0:
try:
URL_queue = youtube_download(
reaction.message,
server_queue[server_index[str(reaction.message.guild.id)]][
1
],
)
if reaction.message.guild.voice_client.is_playing() != True:
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[
server_index[str(
reaction.message.guild.id)]
][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(
name="Repeating Song", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[
server_index[str(
reaction.message.guild.id)]
][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(
name="Repeating Song", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[
server_index[str(
reaction.message.guild.id)]
][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
reaction.message.guild.voice_client.channel.bitrate
/ 1000
)
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(
name="Error", icon_url=url_author_music)
await reaction.message.edit(embed=embed)
else:
embed = discord.Embed(
description=f"Connect to the voice channel first 🔊", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
if reaction.emoji == "🔀":
if str(user) != str(bot.user) and reaction.message.author == bot.user:
await reaction.remove(user)
if members_in_vc.count(str(user)) > 0:
random_song = random.choice(server_queue)
queue_index = server_index[str(reaction.message.guild.id)]
for index in range(len(server_queue)):
if random_song == server_queue[index]:
queue_index = int(index)
server_index[str(reaction.message.guild.id)] = queue_index
URL_shuffle = youtube_download(
reaction.message, random_song[1])
if reaction.message.guild.voice_client.is_playing() == False:
embed = discord.Embed(
description=f"**Song: **{random_song[0]}\n**Queue Index: **{queue_index}".replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(name="Shuffle Play",
icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=random_song[1]).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(url=random_song[1]).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(url=random_song[1]).length
),
inline=True,
)
embed.set_footer(
text=f"Voice Channel Bitrate: {reaction.message.guild.voice_client.channel.bitrate/1000} kbps"
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_shuffle, **FFMPEG_OPTS))
else:
voice.stop()
embed = discord.Embed(
description=f"**Song: **{random_song[0]}\n**Queue Index: **{queue_index}".replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(name="Shuffle Play",
icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=random_song[1]).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(url=random_song[1]).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(url=random_song[1]).length
),
inline=True,
)
embed.set_footer(
text=f"Voice Channel Bitrate: {reaction.message.guild.voice_client.channel.bitrate/1000} kbps"
)
await reaction.message.edit(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_shuffle, **FFMPEG_OPTS))
else:
embed = discord.Embed(
description=f"{reaction.message.author.name}, connect to a voice channel first 🔊",
color=color,
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await reaction.message.edit(embed=embed)
# ---------------------------------------------- STANDARD ----------------------------------------------------
@bot.command(aliases=["spidey", "spiderman", "webslinger", "webhead", "wallcrawler"])
async def spiderman_signal(ctx):
number_of_requests()
calls = [
f"{ctx.author.name} is calling you!",
f"Your aid has been requested by {ctx.author.name}.",
f"{ctx.author.name} has got something for ya.",
f"{ctx.author.name} requires your assistance.",
f"{ctx.author.name} has called.",
]
embed = discord.Embed(description=random.choice(calls), color=color)
embed.set_image(url=random.choice(hello_urls))
await ctx.send("<@!622497106657148939>")
await ctx.send(embed=embed)
@bot.command(
aliases=["hello", "hi", "hey", "hey there",
"salut", "kon'nichiwa", "hola", "aloha"]
)
async def greet_bot(ctx):
number_of_requests()
greetings = [
f"Hey {ctx.author.name}!",
f"Hi {ctx.author.name}!",
f"How's it going {ctx.author.name}?",
f"What can I do for you {ctx.author.name}?",
f"What's up {ctx.author.name}?",
f"Hello {ctx.author.name}!",
f"So {ctx.author.name}, how's your day going?",
]
embed = discord.Embed(color=color)
embed.set_author(name=random.choice(greetings),
icon_url=ctx.author.avatar_url)
embed.set_image(url=random.choice(hello_urls))
await ctx.send(embed=embed)
@bot.command(aliases=["help", "use"])
async def embed_help(ctx):
number_of_requests()
message = await ctx.send(embed=help_menu())
await message.add_reaction("⬅")
await message.add_reaction("🕸")
await message.add_reaction("➡")
@bot.command(aliases=["quips"])
async def get_quips(ctx):
number_of_requests()
try:
embed = discord.Embed(
title=random.choice(titles),
description=random.choice(dialogue_list),
color=color,
)
embed.set_thumbnail(url=random.choice(url_thumbnails))
embed.set_footer(text=random.choice(footers),
icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
print("Quip successfully sent!")
except Exception as e:
embed = discord.Embed(title="Error", description=str(e), color=color)
# ----------------------------------------------- INTERNET ---------------------------------------------
@bot.command(aliases=["imdb"])
async def IMDb_movies(ctx, *, movie_name=None):
number_of_requests()
if movie_name is None:
embed = discord.Embed(description=random.choice(
imdb_responses), color=color)
embed.set_author(name="Ahem ahem", icon_url=url_imdb_author)
await ctx.send(embed=embed)
if movie_name is not None:
try:
db = imdb.IMDb()
movie = db.search_movie(movie_name)
title = movie[0]["title"]
movie_summary = (
db.get_movie(movie[0].getID())
.summary()
.replace("=", "")
.replace("Title", "**Title**")
.replace("Movie", "")
.replace("Genres", "**Genres**")
.replace("Director", "**Director**")
.replace("Writer", "**Writer(s)**")
.replace("Cast", "**Cast**")
.replace("Country", "**Country**")
.replace("Language", "**Language**")
.replace("Rating", "**Rating**")
.replace("Plot", "**Plot**")
.replace("Runtime", "**Runtime**")
)
movie_cover = movie[0]["full-size cover url"]
embed = discord.Embed(
title="🎬 {} 🍿".format(title), description=movie_summary, color=color
)
embed.set_thumbnail(url=url_imdb_thumbnail) # 🎥 🎬 📽
embed.set_image(url=movie_cover)
await ctx.send(embed=embed)
except Exception:
embed = discord.Embed(
description="I couldn't find `{}`.\nTry again and make sure you enter the correct movie name.".format(
movie_name
),
color=color,
)
embed.set_author(name="Movie Not Found 💬",
icon_url=url_imdb_author)
await ctx.send(embed=embed)
@bot.command(aliases=["reddit", "rd"])
async def reddit_memes(ctx, *, topic):
number_of_requests()
if str(ctx.guild.id) not in default_topic:
default_topic[str(ctx.guild.id)] = str(topic)
else:
pass
if str(ctx.guild.id) in default_topic:
default_topic[str(ctx.guild.id)] = str(topic)
sub = reddit.subreddit(topic).random()
try:
embed = discord.Embed(
description="**Caption:\n**{}".format(sub.title), color=color
)
embed.set_author(
name="Post by: {}".format(sub.author), icon_url=url_reddit_author
)
embed.set_thumbnail(url=url_reddit_thumbnail)
embed.set_image(url=sub.url)
embed.set_footer(
text="🔺: {} 🔻: {} 💬: {}".format(
sub.ups, sub.downs, sub.num_comments)
)
message = await ctx.send(embed=embed)
await message.add_reaction("🖱")
except Exception:
default_topic[str(ctx.guild.id)] = ""
embed = discord.Embed(
description="Looks like the subreddit is either banned or does not exist 🤔",
color=color,
)
embed.set_author(name="Subreddit Not Found",
icon_url=url_reddit_author)
await ctx.send(embed=embed)
@bot.command(aliases=["wiki", "w"])
async def wikipedia_results(ctx, *, thing_to_search):
number_of_requests()
try:
try:
title = wikipedia.page(thing_to_search)
embed = discord.Embed(
description=wikipedia.summary(thing_to_search), color=color
)
embed.set_author(name=title.title, icon_url=url_wiki)
embed.add_field(
name="Search References",
value=", ".join(
[x for x in wikipedia.search(thing_to_search)][:5]),
inline=False,
)
embed.set_footer(
text="Searched by: {}".format(ctx.author.name),
icon_url=ctx.author.avatar_url,
)
await ctx.send(embed=embed)
print("Results for wikipedia search sent...")
except wikipedia.PageError as pe:
embed = discord.Embed(description=str(pe), color=color)
embed.set_author(name="Error", icon_url=url_wiki)
await ctx.send(embed=embed)
except wikipedia.DisambiguationError as de:
embed = discord.Embed(description=str(de), color=color)
embed.set_author(name="Hmm...", icon_url=url_wiki)
await ctx.send(embed=embed)
@bot.command(aliases=["google", "g"])
async def google_results(ctx, *, thing_to_search):
number_of_requests()
results = " "
for result in search(
thing_to_search,
tld="com",
lang="en",
safe="off",
num=6,
start=0,
stop=10,
pause=1.0,
):
results += result + "\n"
await ctx.send("Search results for: **{}**".format(thing_to_search))
await ctx.send(results)
print("Results for google search sent...")
# ------------------------------------------------- UTILITY -------------------------------------------------
@bot.command(aliases=["delete", "del"])
async def clear(ctx, text, num=10000000000000):
number_of_requests()
await ctx.channel.purge(limit=1)
if str(text) == "WEB":
await ctx.channel.purge(limit=num)
else:
await ctx.send("Incorrect Password")
@bot.command(aliases=["[X]"])
async def stop_program(ctx):
number_of_requests()
msgs = [
f"Bye {ctx.author.name}!",
f"See ya {ctx.author.name}!",
f"Till next time {ctx.author.name}!",
]
if ctx.author.id == 622497106657148939:
try:
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
voice.stop()
await voice.disconnect()
except:
pass
conn.commit()
await ctx.send(random.choice(msgs))
print(random.choice(msgs))
exit()
else:
await ctx.send("Access Denied")
@bot.command(aliases=["say"])
async def replicate_user_text(ctx, *, text):
number_of_requests()
await ctx.channel.purge(limit=1)
await ctx.send(text)
@bot.command(aliases=["polls", "poll"])
async def conduct_poll(ctx, ems=None, title=None, *, description=None):
number_of_requests()
poll_channel = None
for i in ctx.guild.channels:
for j in poll_channels:
if i.name == j:
send_to = i.name = j
poll_channel = discord.utils.get(
ctx.guild.channels, name=send_to)
if title is not None:
if "_" in title:
title = title.replace("_", " ")
if ems is not None and title is not None and description is not None:
embed = discord.Embed(
title=f"Topic: {title}", description=description, color=color
)
embed.set_footer(
text=f"Conducted by: {ctx.author.name}", icon_url=ctx.author.avatar_url
)
message = await poll_channel.send(embed=embed)
if ems == "y/n" or ems == "yes/no":
await message.add_reaction("✅")
await message.add_reaction("❌")
elif ems == "t/t" or ems == "this/that":
await message.add_reaction("👈🏻")
await message.add_reaction("👉🏻")
else:
emojis = list(ems.split(","))
for emoji in emojis:
await message.add_reaction(emoji)
if ctx.channel.name != poll_channel:
await ctx.send(
embed=discord.Embed(
description="Poll Sent Successfully 👍🏻", color=color
)
)
elif title is None and description is None and ems is None:
embed = discord.Embed(
title="Polls",
description="Command: `_polls emojis title description`",
color=color,
)
embed.add_field(
name="Details",
value="`emojis:` enter emojis for the poll and they will be added as reactions\n`title:` give a title to your poll.\n`description:` tell everyone what the poll is about.",
inline=False,
)
embed.add_field(
name="Notes",
value="To add reactions to poll the multiple emojis should be separated by a `,`.\nIf you wish to use default emojis, `y/n` for yes or no and `t/t` for this or that.\nIf the title happens to be more than one word long, use `_` in place of spaces as demonstrated below.\nExample: `The_Ultimate_Choice` will be displayed in the title of poll as `The Ultimate Choice`.",
inline=False,
)
embed.set_thumbnail(url=random.choice(url_thumbnails))
await ctx.send(embed=embed)
@bot.command(aliases=["req", "requests"])
async def total_requests(ctx):
number_of_requests()
operation = "SELECT MAX(number) FROM requests"
cursor.execute(operation)
total = cursor.fetchall()
embed = discord.Embed(
description=f"""**Requests Made:\n**{str(total).replace("[(", " ").replace(",)]", " ")}""",
color=color,
)
await ctx.send(embed=embed)
@bot.command(aliases=["troll"])
async def troll_snipe(ctx):
await ctx.channel.purge(limit=1)
await ctx.send(random.choice(troll_links))
await ctx.channel.purge(limit=1)
@bot.command(aliases=["web"])
async def snipe(ctx):
number_of_requests()
try:
message = deleted_messages[ctx.channel.id][-1]
if len(message) < 3:
embed = discord.Embed(
title="Deleted Message", description=message[1], color=color
)
embed.set_footer(
text=f"Sent by: {bot.get_user(int(message[0]))}",
icon_url=bot.get_user(int(message[0])).avatar_url,
)
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="Embed deleted 👇🏻", color=color)
embed.set_author(
name=bot.get_user(int(message[0])),
icon_url=bot.get_user(int(message[0])).avatar_url,
)
await ctx.send(embed=embed)
await ctx.send(embed=message[1])
except KeyError:
await ctx.send(
embed=discord.Embed(
description="There is nothing to web up 🕸", color=color)
)
@bot.command(aliases=["pfp"])
async def user_pfp(ctx, member: discord.Member = None):
number_of_requests()
if member is None:
embed = discord.Embed(
title="Profile Picture : {}".format(ctx.author.name), color=color
)
embed.set_image(url=ctx.author.avatar_url)
else:
embed = discord.Embed(
title="Profile Picture : {}".format(member.name), color=color
)
embed.set_image(url=member.avatar_url)
embed.set_footer(
text=random.choice(compliments),
icon_url="https://i.pinimg.com/236x/9f/9c/11/9f9c11d4eaa3d99bc9a8ece092f5e979.jpg",
)
await ctx.send(embed=embed)
@bot.command(aliases=["ping"])
async def get_ping(ctx):
number_of_requests()
ping = round(bot.latency * 1000)
c1 = "🟢"
c2 = "🟡"
c3 = "🔴"
if ping >= 350:
embed = discord.Embed(description=f"{c3} {ping} ms", color=color)
await ctx.send(embed=embed)
elif ping <= 320:
embed = discord.Embed(description=f"{c1} {ping} ms", color=color)
await ctx.send(embed=embed)
elif ping > 320 and ping < 350:
embed = discord.Embed(description=f"{c2} {ping} ms", color=color)
await ctx.send(embed=embed)
@bot.command(aliases=["serverinfo", "si"])
async def server_information(ctx):
number_of_requests()
name = str(ctx.guild.name)
ID = str(ctx.guild.id)
description = str(ctx.guild.description)
owner = str(ctx.guild.owner)
region = str(ctx.guild.region)
num_mem = str(ctx.guild.member_count)
icon = str(ctx.guild.icon_url)
role_count = len(ctx.guild.roles)
# bots_list = [bot.mention for bot in ctx.guild.members if bot.bot]
embed = discord.Embed(title=f"📚 {name} 📚", color=color)
embed.add_field(name="Owner", value=f"`{owner}`", inline=True)
embed.add_field(name="Member Count", value=f"`{num_mem}`", inline=True)
embed.add_field(name="Role Count", value=f"`{role_count}`", inline=True)
embed.add_field(name="Region", value=f"`{region}`", inline=True)
embed.add_field(name="Server ID", value=f"`{ID}`", inline=False)
embed.add_field(name="Description",
value=f"```{description}```", inline=False)
embed.set_footer(
text=f"Created on {ctx.guild.created_at.__format__('%A, %B %d, %Y @ %H:%M:%S')}",
icon_url=ctx.author.avatar_url,
)
embed.set_image(url=icon)
await ctx.send(embed=embed)
# --------------------------------------- ENCRYPER DECRYPTER ---------------------------------
@bot.command(aliases=["hush"])
async def encrypt_data(ctx, mode, *, message):
number_of_requests()
res = message.encode()
try:
if mode == "en":
embed = discord.Embed(
title="Message Encrpyted",
description="```{}```".format(str(cipher.encrypt(res)))
.replace("b'", "")
.replace("'", ""),
color=color,
)
embed.set_thumbnail(url=url_en_dec)
await ctx.channel.purge(limit=1)
await ctx.send(embed=embed)
if mode == "dec":
embed = discord.Embed(
title="Message Decrypted",
description="```{}```".format(str(cipher.decrypt(res)))
.replace("b'", "")
.replace("'", ""),
color=color,
)
embed.set_thumbnail(url=url_en_dec)
await ctx.channel.purge(limit=1)
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(title="Error", description=str(e), color=color)
embed.set_thumbnail(url=url_en_dec)
await ctx.send(embed=embed)
# ------------------------------------- DATE TIME CALENDAR ---------------------------------------------
@bot.command(aliases=["dt"])
async def date_time_ist(ctx, timezone=None):
number_of_requests()
if timezone is None:
tzinfo = pytz.timezone(default_tz)
dateTime = datetime.datetime.now(tz=tzinfo)
embed = discord.Embed(color=color)
embed.add_field(
name="Date",
value="%s/%s/%s" % (dateTime.day, dateTime.month, dateTime.year),
inline=True,
)
embed.add_field(
name="Time",
value="%s:%s:%s" % (
dateTime.hour, dateTime.minute, dateTime.second),
inline=True,
)
embed.set_footer(text=f"Timezone : {default_tz}")
# embed.set_thumbnail(url=url_dtc)
await ctx.send(embed=embed)
else:
tzinfo = pytz.timezone(timezone)
dateTime = datetime.datetime.now(tz=tzinfo)
embed = discord.Embed(color=color)
embed.add_field(
name="Date",
value="%s/%s/%s" % (dateTime.day, dateTime.month, dateTime.year),
inline=True,
)
embed.add_field(
name="Time",
value="%s:%s:%s" % (
dateTime.hour, dateTime.minute, dateTime.second),
inline=True,
)
# embed.set_thumbnail(url=url_dtc)
embed.set_footer(text=f"Timezone : {timezone}")
await ctx.send(embed=embed)
@bot.command(aliases=["cal"])
async def get_calendar(ctx, year, month):
number_of_requests()
try:
embed = discord.Embed(
title="Calendar",
description="```{}```".format(
calendar.month(int(year), int(month))),
color=color,
)
embed.set_thumbnail(url=url_dtc)
await ctx.send(embed=embed)
except IndexError:
embed = discord.Embed(
description="{}, this month doesn't exist 📆".format(
ctx.author.name),
color=color,
)
embed.set_author(name="Calendar", icon_url=url_dtc)
await ctx.send(embed=embed)
# ------------------------------------------ SHELLS --------------------------------------------
@bot.command(aliases=[";"])
async def sql_shell(ctx, *, expression):
number_of_requests()
try:
output = ""
cursor.execute(expression)
for item in cursor.fetchall():
output += str(item) + "\n"
conn.commit()
embed = discord.Embed(
title=str(expression), description=str(output), color=color
)
embed.set_author(name="MySQL Shell", icon_url=url_author_sql)
await ctx.send(embed=embed)
except Exception as e:
embed_err = discord.Embed(
title="Error", description=str(e), color=color)
embed_err.set_author(name="MySQL Shell", icon_url=url_author_sql)
await ctx.send(embed=embed_err)
@bot.command(aliases=["py"])
async def python_shell(ctx, *, expression):
number_of_requests()
if expression in denied or denied[-2] in expression or denied[-1] in expression:
embed = discord.Embed(description=random.choice(
denied_responses), color=color)
embed.set_author(name="Access Denied", icon_url=url_author_python)
await ctx.send(embed=embed)
else:
try:
embed_acc = discord.Embed(
title=str(expression), description=str(eval(expression)), color=color
)
embed_acc.set_author(name="Python Shell",
icon_url=url_author_python)
await ctx.send(embed=embed_acc)
except Exception as e:
embed_err = discord.Embed(
title="Error", description=str(e), color=color)
embed_err.set_author(name="Python Shell",
icon_url=url_author_python)
await ctx.send(embed=embed_err)
@bot.command(aliases=["pydoc"])
async def function_info(ctx, func):
number_of_requests()
try:
if "(" in [char for char in func] and ")" in [char for char in func]:
embed = discord.Embed(
description=random.choice(no_functions), color=color)
embed.set_author(name="Access Denied", icon_url=url_author_python)
await ctx.send(embed=embed)
else:
function = eval(func)
embed = discord.Embed(description=function.__doc__, color=color)
embed.set_author(name="Info: {}".format(
func), icon_url=url_author_python)
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_python)
await ctx.send(embed=embed)
# ----------------------------------------------- MUSIC ----------------------------------------------------
@bot.command(aliases=["cn", "connect"])
async def join_vc(ctx):
number_of_requests()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
try:
if not ctx.message.author.voice:
embed = discord.Embed(
description="{}, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
await ctx.send(embed=embed)
if voice == None:
channel = ctx.message.author.voice.channel
await channel.connect()
embed = discord.Embed(
description=f"Connected to {ctx.guild.voice_client.channel.name}",
color=color,
)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
embed.set_footer(text=random.choice(connections))
await ctx.send(embed=embed)
if voice != None:
embed = discord.Embed(
description="Already connected to a voice channel ✅", color=color
)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description="Error:\n" + str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["dc", "disconnect"])
async def leave_vc(ctx):
number_of_requests()
try:
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
voice_client = ctx.message.guild.voice_client
try:
if voice_client.is_connected():
embed = discord.Embed(
description=f"Disconnected from {ctx.guild.voice_client.channel.name}",
color=color,
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(text=random.choice(disconnections))
await ctx.send(embed=embed)
await voice_client.disconnect()
except AttributeError:
embed = discord.Embed(
description="I am not connected to a voice channel", color=color
)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{}, buddy, connect to the voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
await ctx.send(embed=embed)
except AttributeError:
embed = discord.Embed(
description="I am not connected to a voice channel", color=color
)
embed.set_author(name="Spider-Punk Radio™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["setbit", "bit"])
async def set_bitrate(ctx, kbps):
number_of_requests()
for items in ydl_op["postprocessors"]:
items["preferredquality"] = str(kbps)
embed = discord.Embed(
description="**Bitrate:** {} kbps".format(kbps), color=color
)
embed.set_author(name="Audio Quality", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["queue", "q"])
async def queue_song(ctx, *, name=None):
number_of_requests()
if ctx.author.id not in [
member.id for member in ctx.guild.voice_client.channel.members
]:
embed = discord.Embed(
description="{}, buddy, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
if name is not None:
# WEB SCRAPE
name = name.replace(" ", "+")
htm = urllib.request.urlopen(
"https://www.youtube.com/results?search_query=" + name
)
video = regex.findall(r"watch\?v=(\S{11})", htm.read().decode())
url = "https://www.youtube.com/watch?v=" + video[0]
htm_code = str(urllib.request.urlopen(url).read().decode())
starting = htm_code.find("<title>") + len("<title>")
ending = htm_code.find("</title>")
name_of_the_song = (
htm_code[starting:ending].replace(
"'", "'").replace("&", "&")
)
# check if song is already queued
operation_check = (
f"SELECT song_url FROM music_queue WHERE server={str(ctx.guild.id)}"
)
cursor.execute(operation_check)
index, check_list, links = None, [], cursor.fetchall()
for link in links:
link = str(link).replace(
"(", "").replace(",)", "").replace("'", "")
check_list.append(link)
if url in check_list:
def song_position():
for position in range(len(check_list)):
if url == check_list[position]:
return position
embed = discord.Embed(
description=f"{random.choice(already_queued)}\nSong Postion: {song_position()}",
color=color,
)
embed.set_author(name="Already Queued",
icon_url=url_author_music)
await ctx.send(embed=embed)
else:
operation_add_song = f"""INSERT INTO music_queue(song_name, song_url, server)VALUES("{name_of_the_song}","{url}","{str(ctx.guild.id)}")"""
cursor.execute(operation_add_song)
embed = discord.Embed(
description=f"{name_of_the_song}".replace(
" - YouTube", " "),
color=color,
)
embed.set_author(name="Song added", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
operation_view = (
"SELECT song_name, song_url FROM music_queue WHERE server={}".format(
str(ctx.guild.id)
)
)
cursor.execute(operation_view)
songs = cursor.fetchall()
if len(songs) > 0:
try:
string = ""
if server_index[str(ctx.guild.id)] > 10:
index = server_index[str(ctx.guild.id)] - 10
for song in songs[index: index + 20]:
string += (
str(index)
+ ") "
+ f"{song[0]}\n".replace(" - YouTube", " ")
)
index += 1
embed = discord.Embed(description=string, color=color)
embed.set_author(
name=f"{ctx.guild.name}'s Playlist",
icon_url=url_author_music,
)
embed.set_thumbnail(
url=random.choice(url_thumbnail_music))
embed.set_footer(text=f"Number Of Songs: {len(songs)}")
player = await ctx.send(embed=embed)
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
else:
index = server_index[str(ctx.guild.id)]
for song in songs[index: index + 20]:
string += (
str(index)
+ ") "
+ f"{song[0]}\n".replace(" - YouTube", " ")
)
index += 1
embed = discord.Embed(description=string, color=color)
embed.set_author(
name=f"{ctx.guild.name}'s Playlist",
icon_url=url_author_music,
)
embed.set_thumbnail(
url=random.choice(url_thumbnail_music))
embed.set_footer(text=f"Number Of Songs: {len(songs)}")
player = await ctx.send(embed=embed)
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
except KeyError:
embed = discord.Embed(
description=random.choice(default_index), color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description=random.choice(empty_queue), color=color
)
embed.set_author(
name=f"{ctx.guild.name}'s Playlist", icon_url=url_author_music
)
embed.set_thumbnail(url=random.choice(url_thumbnail_music))
embed.set_footer(
text="Queue songs by using _q song, t!q song, |q song")
await ctx.send(embed=embed)
@bot.command(aliases=["play", "p"])
async def play_music(ctx, *, char):
number_of_requests()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
try:
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
if char.isdigit() == False:
if str(ctx.guild.id) not in server_index:
server_index[str(ctx.guild.id)] = 0
else:
pass
# Web Scrape
char = char.replace(" ", "+")
htm = urllib.request.urlopen(
"https://www.youtube.com/results?search_query=" + char
)
video = regex.findall(
r"watch\?v=(\S{11})", htm.read().decode())
url = "https://www.youtube.com/watch?v=" + video[0]
htm_code = str(urllib.request.urlopen(url).read().decode())
starting = htm_code.find("<title>") + len("<title>")
ending = htm_code.find("</title>")
name_of_the_song = (
htm_code[starting:ending]
.replace("'", "'")
.replace("&", "&")
.replace(" - YouTube", " ")
)
URL_direct = youtube_download(ctx, url)
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(
description="**Song: **{}".format(name_of_the_song).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now playing", url=url, icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(url=url).thumbnail_url)
embed.add_field(
name="Uploader",
value=pytube.YouTube(url=url).author,
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(url=url).length),
inline=True,
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_direct, **FFMPEG_OPTS))
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏹") # stop
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{}".format(name_of_the_song).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now playing", url=url, icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
embed.set_thumbnail(
url=pytube.YouTube(url=url).thumbnail_url)
embed.add_field(
name="Uploader",
value=pytube.YouTube(url=url).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(url=url).length),
inline=True,
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_direct, **FFMPEG_OPTS))
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏹") # stop
if char.isdigit() == True:
# Server Specific Queue
operation = (
f"SELECT * FROM music_queue WHERE server={str(ctx.guild.id)}"
)
cursor.execute(operation)
server_queue = cursor.fetchall()
if str(ctx.guild.id) not in server_index:
server_index[str(ctx.guild.id)] = int(char)
if str(ctx.guild.id) in server_index:
server_index[str(ctx.guild.id)] = int(char)
try:
URL_queue = youtube_download(
ctx, server_queue[int(char)][1])
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(
description="**Song: **{a}\n**Queue Index: **{b}".format(
a=server_queue[int(char)][0], b=char
).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now playing", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[int(char)][1]
).thumbnail_url
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[int(char)][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[int(char)][1]
).length
),
inline=True,
)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
player = await ctx.send(embed=embed)
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{a}\n**Queue Index: **{b}".format(
a=server_queue[int(char)][0], b=char
).replace(
" - YouTube", " "
),
color=color,
)
embed.set_author(
name="Now playing", icon_url=url_author_music
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[int(char)][1]
).thumbnail_url
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[int(char)][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[int(char)][1]
).length
),
inline=True,
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(
URL_queue, **FFMPEG_OPTS))
# previous track
await player.add_reaction("⏮")
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
except IndexError:
embed = discord.Embed(
description="Looks like there is no song at this index",
color=color,
)
embed.set_author(
name="Oops...", icon_url=url_author_music)
await ctx.send(embed=embed)
except AttributeError:
embed = discord.Embed(
description="I am not connected to a voice channel".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Voice", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{}, buddy, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
await ctx.send(embed=embed)
except AttributeError:
embed = discord.Embed(
description="I am not connected to a voice channel".format(
ctx.author.name),
color=color,
)
embed.set_author(name="Voice", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["songinfo"])
async def fetch_current_song(ctx):
number_of_requests()
global server_index
operation = "SELECT * FROM music_queue WHERE server={}".format(
str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
if len(server_queue) <= 0:
embed = discord.Embed(
description="There are no songs in the queue currently 🤔")
embed.set_author(name="Uh oh...", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
try:
embed = discord.Embed(
description="**Song: **{a}\n**Index: **{b}\n**Views: **{c}\n**Description: **\n{d}".format(
a=server_queue[server_index[str(ctx.guild.id)]][0],
b=server_index[str(ctx.guild.id)],
c=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).views,
d=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).description,
),
color=color,
)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).thumbnail_url
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
embed.set_author(name="Currently Playing",
icon_url=url_author_music)
player = await ctx.send(embed=embed)
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
except KeyError:
embed = discord.Embed(
description=random.choice(default_index), color=color)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["prev", "previous"])
async def previous_song(ctx):
number_of_requests()
global server_index
server_index[str(ctx.guild.id)] -= 1
operation = "SELECT * FROM music_queue WHERE server={}".format(
str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
URL_queue = youtube_download(
ctx, server_queue[server_index[str(ctx.guild.id)]][1]
)
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[server_index[str(ctx.guild.id)]][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[server_index[str(
ctx.guild.id)]][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[server_index[str(ctx.guild.id)]][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[server_index[str(
ctx.guild.id)]][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
except IndexError:
embed = discord.Embed(
description="Looks like there is no song at this index", color=color
)
embed.set_author(name="Oops...", icon_url=url_author_music)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{}, buddy, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["rep", "repeat"])
async def repeat_song(ctx):
operation = "SELECT * FROM music_queue WHERE server={}".format(
str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
try:
URL_queue = youtube_download(
ctx, server_queue[server_index[str(ctx.guild.id)]][1]
)
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[server_index[str(ctx.guild.id)]][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(name="Repeating Song", icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[server_index[str(ctx.guild.id)]][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(name="Repeating Song", icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["skip", "next"])
async def skip_song(ctx):
number_of_requests()
global server_index
server_index[str(ctx.guild.id)] += 1
operation = "SELECT * FROM music_queue WHERE server={}".format(
str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
URL_queue = youtube_download(
ctx, server_queue[server_index[str(ctx.guild.id)]][1]
)
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[server_index[str(ctx.guild.id)]][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(name="Now Playing", icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[server_index[str(
ctx.guild.id)]][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
else:
voice.stop()
embed = discord.Embed(
description="**Song: **{}".format(
server_queue[server_index[str(ctx.guild.id)]][0]
).replace(" - YouTube", " "),
color=color,
)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(
url=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).thumbnail_url
)
embed.add_field(
name="Uploader",
value=pytube.YouTube(
url=server_queue[server_index[str(ctx.guild.id)]][1]
).author,
inline=True,
)
embed.add_field(
name="Duration",
value=time_converter(
pytube.YouTube(
url=server_queue[server_index[str(
ctx.guild.id)]][1]
).length
),
inline=True,
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔠") # display queue
await player.add_reaction("🔼") # scroll
await player.add_reaction("🔽") # scroll
except IndexError:
embed = discord.Embed(
description="Looks like there is no song at this index", color=color
)
embed.set_author(name="Oops...", icon_url=url_author_music)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{}, buddy, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["pause"])
async def pause_song(ctx):
number_of_requests()
voice_client = ctx.message.guild.voice_client
pause = ctx.voice_client.is_paused()
playing = ctx.voice_client.is_playing()
if ctx.author.id in [mem.id for mem in ctx.voice_client.channel.members]:
try:
if playing == True:
voice_client.pause()
message = await ctx.send("Song paused")
await message.add_reaction("⏸")
else:
if pause == True:
embed = discord.Embed(
description="Song is already paused ❗", color=color
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="No song playing currently ❗", color=color
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{}, buddy, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["resume", "res"])
async def resume_song(ctx):
number_of_requests()
voice_client = ctx.message.guild.voice_client
pause = ctx.voice_client.is_paused()
playing = ctx.voice_client.is_playing()
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
if pause == True:
voice_client.resume()
message = await ctx.send("Song resumed")
await message.add_reaction("▶")
else:
if playing == True:
embed = discord.Embed(
description="Song is not paused 🤔", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="Nothing is playing right now", color=color
)
embed.set_author(
name="Spider-Punk Radio™", icon_url=url_author_music
)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{}, buddy, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["stop", "st"])
async def stop_song(ctx):
number_of_requests()
voice_client = ctx.message.guild.voice_client
pause = ctx.voice_client.is_paused()
playing = ctx.voice_client.is_playing()
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
if playing == True or pause == True:
voice_client.stop()
message = await ctx.send("Song stopped")
await message.add_reaction("⏹")
else:
embed = discord.Embed(
description="Nothing is playing right now", color=color
)
embed.set_author(name="Spider-Punk Radio™",
icon_url=url_author_music)
embed.set_footer(
text="Voice Channel Bitrate: {} kbps".format(
ctx.guild.voice_client.channel.bitrate / 1000
)
)
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
description="{}, buddy, connect to a voice channel first 🔊".format(
ctx.author.name
),
color=color,
)
embed.set_author(name="Spider-Punk Radio™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["rem", "remove"])
async def remove_song(ctx, index):
number_of_requests()
operation_view = 'SELECT * FROM music_queue WHERE server="{}"'.format(
str(ctx.guild.id)
)
cursor.execute(operation_view)
songs = cursor.fetchall()
embed = discord.Embed(description="{}".format(
songs[int(index)][0]), color=color)
embed.set_author(name="Song removed", icon_url=url_author_music)
await ctx.send(embed=embed)
operation_remove = (
"DELETE FROM music_queue WHERE song_url = '{a}' AND server='{b}'".format(
a=songs[int(index)][1], b=str(ctx.guild.id)
)
)
cursor.execute(operation_remove)
@bot.command(aliases=["clear_queue", "cq"])
async def clear_song_queue(ctx):
number_of_requests()
operation_queue = "SELECT * FROM music_queue WHERE server={}".format(
str(ctx.guild.id)
)
cursor.execute(operation_queue)
songs = cursor.fetchall()
if len(songs) > 0:
operation_clear_song = "DELETE FROM music_queue WHERE server={}".format(
str(ctx.guild.id)
)
cursor.execute(operation_clear_song)
message = await ctx.send("Queue Cleared")
await message.add_reaction("✅")
else:
embed_empty = discord.Embed(
description=random.choice(empty_queue), color=color)
embed_empty.set_author(name="Hmm...", icon_url=url_author_music)
await ctx.send(embed=embed_empty)
# -------------------------------------------------- EXTRA ---------------------------------------------------------
@bot.command(aliases=["thwip"])
async def thwipper(ctx):
number_of_requests()
await ctx.send(embed=discord.Embed(title="*Thwip!*", color=color))
@bot.command(aliases=["addbday"])
async def add_user_bday(ctx, member: discord.Member, month, day):
number_of_requests()
op_check = "SELECT mem_id FROM birthdays"
cursor.execute(op_check)
memIDs = cursor.fetchall()
try:
a = str([memID for memID in memIDs]).replace(
"('", "").replace("',)", "")
if str(member.id) not in a:
op_insert = "INSERT INTO birthdays(mem_id, mem_month, mem_day)VALUES('{a}',{b},{c})".format(
a=member.id, b=month, c=day
)
cursor.execute(op_insert)
await ctx.send(
embed=discord.Embed(
description="{}'s birthday added to database".format(
member.display_name
),
color=color,
)
)
else:
await ctx.send(
embed=discord.Embed(
description="{}'s birthday is already added in my database".format(
member.display_name
),
color=color,
)
)
except Exception as e:
await ctx.send(str(e))
@bot.command(aliases=["rembday"])
async def remove_user_bday(ctx, member: discord.Member):
number_of_requests()
op_check = "SELECT mem_id FROM birthdays"
cursor.execute(op_check)
memIDs = cursor.fetchall()
try:
a = str([memID for memID in memIDs]).replace(
"('", "").replace("',)", "")
if str(member.id) in a:
op_insert = "DELETE FROM birthdays WHERE mem_id={}".format(
member.id)
cursor.execute(op_insert)
await ctx.send(
embed=discord.Embed(
description="{}'s birthday removed from database".format(
member.display_name
),
color=color,
)
)
else:
await ctx.send(
embed=discord.Embed(
description="{}'s birthday does not exist in my database".format(
member.display_name
),
color=color,
)
)
except Exception as e:
await ctx.send(str(e))
@bot.command(aliases=["bday"])
async def check_user_bdays_and_wish(ctx):
number_of_requests()
await ctx.channel.purge(limit=1)
op_check = "SELECT * FROM birthdays"
cursor.execute(op_check)
bdays = cursor.fetchall()
channel = None
toggle = 0
for i in ctx.guild.channels:
for j in announcement_channels:
if i.name == j:
send_to = i.name = j
channel = discord.utils.get(ctx.guild.channels, name=send_to)
for bday in bdays: # bday[0] bday[1] bday[2]
if (
datetime.datetime.today().month == bday[1]
and datetime.datetime.today().day == bday[2]
):
name = bot.get_user(int(bday[0])).name
wishes = [
f"🎊 Happy Birthday {name} 🎊",
f"🎉 Happy Birthday {name} 🎉",
f"✨ Happy Birthday {name} ✨",
f"🎇 Happy Birthday {name} 🎇",
]
embed = discord.Embed(
title=random.choice(wishes),
description=random.choice(descriptions),
color=color,
)
embed.set_image(url=random.choice(url_bdays_spiderman))
embed.set_thumbnail(url=bot.get_user(int(bday[0])).avatar_url)
await channel.send(f"<@!{bot.get_user(int(bday[0])).id}>")
message = await channel.send(embed=embed)
await ctx.send(embed=discord.Embed(description="Wish Sent 🥳", color=color))
await message.add_reaction("🎁")
await message.add_reaction("🎈")
await message.add_reaction("🎂")
await message.add_reaction("🎆")
await message.add_reaction("🎉")
toggle = 1
if toggle == 0:
await ctx.send(
embed=discord.Embed(
description=random.choice(none_today), color=color)
)
# --------------------------------------------------------------------------------------------------------------------------------------
| 39.75468
| 498
| 0.445821
|
4a198597472f76d3c5214d8878ae42793b43f57c
| 33
|
py
|
Python
|
optimus/engines/base/cudf/dataframe.py
|
ironmussa/Optimus
|
fbaea6e0957f0bc016280a85ff021904faac20c5
|
[
"Apache-2.0"
] | 1,045
|
2017-07-17T17:59:46.000Z
|
2021-06-15T07:06:48.000Z
|
optimus/engines/base/cudf/dataframe.py
|
ironmussa/Optimus
|
fbaea6e0957f0bc016280a85ff021904faac20c5
|
[
"Apache-2.0"
] | 955
|
2017-07-14T15:47:58.000Z
|
2021-05-27T14:16:24.000Z
|
optimus/engines/base/cudf/dataframe.py
|
ironmussa/Optimus
|
fbaea6e0957f0bc016280a85ff021904faac20c5
|
[
"Apache-2.0"
] | 226
|
2017-08-04T20:41:33.000Z
|
2021-05-21T08:28:33.000Z
|
class CUDFBaseDataFrame:
pass
| 16.5
| 24
| 0.787879
|
4a1986395e0166494f27fce5b7d3e9f9fe3d84ac
| 1,614
|
py
|
Python
|
examples/dogpiling/dogpiling_tornado.py
|
mwek/memoize
|
bd35641d676dbaaf161a61dbf1176742cfa187cc
|
[
"Apache-2.0"
] | null | null | null |
examples/dogpiling/dogpiling_tornado.py
|
mwek/memoize
|
bd35641d676dbaaf161a61dbf1176742cfa187cc
|
[
"Apache-2.0"
] | null | null | null |
examples/dogpiling/dogpiling_tornado.py
|
mwek/memoize
|
bd35641d676dbaaf161a61dbf1176742cfa187cc
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
from tornado import gen
from tornado.ioloop import IOLoop
from memoize.configuration import MutableCacheConfiguration, DefaultInMemoryCacheConfiguration
from memoize.entrybuilder import ProvidedLifeSpanCacheEntryBuilder
from memoize.wrapper import memoize
# scenario configuration
concurrent_requests = 5
request_batches_execution_count = 50
cached_value_ttl_millis = 200
delay_between_request_batches_millis = 70
# results/statistics
unique_calls_under_memoize = 0
@memoize(configuration=MutableCacheConfiguration
.initialized_with(DefaultInMemoryCacheConfiguration())
.set_entry_builder(
ProvidedLifeSpanCacheEntryBuilder(update_after=timedelta(milliseconds=cached_value_ttl_millis))
))
@gen.coroutine
def cached_with_memoize():
global unique_calls_under_memoize
unique_calls_under_memoize += 1
yield gen.sleep(0.01)
return unique_calls_under_memoize
@gen.coroutine
def main():
for i in range(request_batches_execution_count):
res = yield [x() for x in [cached_with_memoize] * concurrent_requests]
print(res)
# yield [x() for x in [cached_with_different_cache] * concurrent_requests]
yield gen.sleep(delay_between_request_batches_millis / 1000)
print("Memoize generated {} unique backend calls".format(unique_calls_under_memoize))
predicted = (delay_between_request_batches_millis * request_batches_execution_count) // cached_value_ttl_millis
print("Predicted (according to TTL) {} unique backend calls".format(predicted))
if __name__ == "__main__":
IOLoop.current().run_sync(main)
| 33.625
| 115
| 0.797398
|
4a19876848d73ec59fa1a20029b91ba0479ff606
| 5,548
|
py
|
Python
|
test_settings.py
|
what-digital/aldryn-newsblog-blog-teaser-size
|
c52cb256fe3b608838f2184de9575b6cbbfd5f8e
|
[
"BSD-3-Clause"
] | null | null | null |
test_settings.py
|
what-digital/aldryn-newsblog-blog-teaser-size
|
c52cb256fe3b608838f2184de9575b6cbbfd5f8e
|
[
"BSD-3-Clause"
] | null | null | null |
test_settings.py
|
what-digital/aldryn-newsblog-blog-teaser-size
|
c52cb256fe3b608838f2184de9575b6cbbfd5f8e
|
[
"BSD-3-Clause"
] | 2
|
2019-10-22T04:30:28.000Z
|
2019-10-22T05:09:16.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils.version import LooseVersion
from django import get_version
from cms import __version__ as cms_string_version
import os
django_version = LooseVersion(get_version())
cms_version = LooseVersion(cms_string_version)
HELPER_SETTINGS = {
'TIME_ZONE': 'Europe/Zurich',
'INSTALLED_APPS': [
'aldryn_apphooks_config',
'aldryn_categories',
'aldryn_people',
'aldryn_reversion',
'aldryn_translation_tools',
'djangocms_text_ckeditor',
'easy_thumbnails',
'filer',
'mptt',
'parler',
'reversion',
'sortedm2m',
'taggit',
],
'TEMPLATE_DIRS': (
os.path.join(
os.path.dirname(__file__),
'aldryn_newsblog', 'tests', 'templates'),
),
'ALDRYN_NEWSBLOG_TEMPLATE_PREFIXES': [('dummy', 'dummy'), ],
'CMS_PERMISSION': True,
'SITE_ID': 1,
'LANGUAGES': (
('en', 'English'),
('de', 'German'),
('fr', 'French'),
),
'CMS_LANGUAGES': {
1: [
{
'code': 'en',
'name': 'English',
'fallbacks': ['de', 'fr', ]
},
{
'code': 'de',
'name': 'Deutsche',
'fallbacks': ['en', ] # FOR TESTING DO NOT ADD 'fr' HERE
},
{
'code': 'fr',
'name': 'Française',
'fallbacks': ['en', ] # FOR TESTING DO NOT ADD 'de' HERE
},
{
'code': 'it',
'name': 'Italiano',
'fallbacks': ['fr', ] # FOR TESTING, LEAVE AS ONLY 'fr'
},
],
'default': {
'redirect_on_fallback': True, # PLEASE DO NOT CHANGE THIS
}
},
# app-specific
'PARLER_LANGUAGES': {
1: [
{
'code': 'en',
'fallbacks': ['de', ],
},
{
'code': 'de',
'fallbacks': ['en', ],
},
],
'default': {
'code': 'en',
'fallbacks': ['en'],
'hide_untranslated': False
}
},
#
# NOTE: The following setting `PARLER_ENABLE_CACHING = False` is required
# for tests to pass.
#
# There appears to be a bug in Parler which leaves translations in Parler's
# cache even after the parent object has been deleted. In production
# environments, this is unlikely to affect anything, because newly created
# objects will have new IDs. In testing, new objects are created with IDs
# that were previously used, which reveals this issue.
#
'PARLER_ENABLE_CACHING': False,
'ALDRYN_SEARCH_DEFAULT_LANGUAGE': 'en',
'HAYSTACK_CONNECTIONS': {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
'de': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
},
'THUMBNAIL_HIGH_RESOLUTION': True,
'THUMBNAIL_PROCESSORS': (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
# 'easy_thumbnails.processors.scale_and_crop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
),
# 'DATABASES': {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': 'mydatabase',
# },
# 'mysql': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'newsblog_test',
# 'USER': 'root',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': '3306',
# },
# 'postgres': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'newsblog_test',
# 'USER': 'test',
# 'PASSWORD': '',
# 'HOST': '127.0.0.1',
# 'PORT': '5432',
# }
# }
# This set of MW classes should work for Django 1.6 and 1.7.
'MIDDLEWARE_CLASSES': [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# NOTE: This will actually be removed below in CMS<3.2 installs.
'cms.middleware.utils.ApphookReloadMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
]
}
# If using CMS 3.2+, use the CMS middleware for ApphookReloading, otherwise,
# use aldryn_apphook_reload's.
if cms_version < LooseVersion('3.2.0'):
HELPER_SETTINGS['MIDDLEWARE_CLASSES'].remove(
'cms.middleware.utils.ApphookReloadMiddleware')
HELPER_SETTINGS['MIDDLEWARE_CLASSES'].insert(
0, 'aldryn_apphook_reload.middleware.ApphookReloadMiddleware')
HELPER_SETTINGS['INSTALLED_APPS'].insert(
0, 'aldryn_apphook_reload')
def run():
from djangocms_helper import runner
# --boilerplate option will ensure correct boilerplate settings are
# added to settings
runner.cms('aldryn_newsblog', extra_args=[])
if __name__ == "__main__":
run()
| 31.885057
| 79
| 0.553533
|
4a19877d272916b1371fc488f7f19a6471da2707
| 2,931
|
py
|
Python
|
vortexasdk/endpoints/freight_pricing_result.py
|
V0RT3X4/python-sdk
|
4cffae83b90a58a56f1a534057fa1ca1c8671e05
|
[
"Apache-2.0"
] | 9
|
2019-11-13T17:14:55.000Z
|
2019-11-18T16:06:13.000Z
|
vortexasdk/endpoints/freight_pricing_result.py
|
VorTECHsa/python-sdk
|
d85aabd8d9843e4d04d857360492bea002c2b24b
|
[
"Apache-2.0"
] | 114
|
2020-01-08T11:08:24.000Z
|
2022-03-30T16:42:23.000Z
|
vortexasdk/endpoints/freight_pricing_result.py
|
V0RT3X4/python-sdk
|
4cffae83b90a58a56f1a534057fa1ca1c8671e05
|
[
"Apache-2.0"
] | 6
|
2020-05-28T00:09:02.000Z
|
2022-03-14T03:52:44.000Z
|
import functools
import os
from multiprocessing.pool import Pool
from typing import List
from vortexasdk.api.freight_pricing import FreightPricing
from vortexasdk.api.vessel_availability import VesselAvailability
import pandas as pd
from vortexasdk.api.entity_flattening import convert_to_flat_dict
from vortexasdk.api.search_result import Result
from vortexasdk.result_conversions import create_dataframe, create_list
from vortexasdk.logger import get_logger
logger = get_logger(__name__)
class FreightPricingResult(Result):
"""
Container class holdings search results returns from the freight pricing endpoint.
This class has two methods, `to_list()`, and `to_df()`, allowing search results to be represented as a list
or as a `pd.DataFrame` , respectively.
"""
def to_list(self) -> List[FreightPricing]:
"""Represent availability as a list."""
# noinspection PyTypeChecker
return create_list(super().to_list(), FreightPricing)
def to_df(self, columns=None) -> pd.DataFrame:
"""
Represent freight pricing as a `pd.DataFrame`.
# Arguments
columns: Output columns present in the `pd.DataFrame`.
Enter `columns='all'` to return all available columns.
Enter `columns=None` to use `freight_pricing_result.DEFAULT_COLUMNS`.
# Returns
`pd.DataFrame`, one row per `FreightPricing`.
## Notes
By default, the columns returned are something along the lines of.
```python
DEFAULT_COLUMNS = [
'short_code',
'rate'
'rate_unit',
'cost',
'cost_unit',
'tce',
'tce_unit'
]
```
The exact default columns used can be found at `vessel_availability_result.DEFAULT_COLUMNS`
A near complete list of columns is given below
```
[
'id',
'short_code',
'rate'
'rate_precision',
'rate_unit',
'cost',
'cost_precision,
'cost_unit',
'tce',
'tce_precision',
'tce_unit',
'source',
'route_prediction'
]
```
"""
if columns is None:
columns = DEFAULT_COLUMNS
logger.debug("Converting each Freight Pricing object to a flat dictionary")
flatten = functools.partial(
convert_to_flat_dict, cols=columns
)
with Pool(os.cpu_count()) as pool:
records = pool.map(flatten, super().to_list())
return create_dataframe(
columns=columns,
default_columns=DEFAULT_COLUMNS,
data=records,
logger_description="FreightPricing",
)
DEFAULT_COLUMNS = [
'short_code',
'rate'
'rate_unit',
'cost',
'cost_unit',
'tce',
'tce_unit'
]
| 27.138889
| 111
| 0.604231
|
4a1987855408b3d3916be773268a6f7bba79260d
| 543
|
py
|
Python
|
product/models/saleForm.py
|
puchopsky/pythonPdv
|
3a53212840c83f577be4b6a48774a4399e1bee04
|
[
"MIT"
] | null | null | null |
product/models/saleForm.py
|
puchopsky/pythonPdv
|
3a53212840c83f577be4b6a48774a4399e1bee04
|
[
"MIT"
] | null | null | null |
product/models/saleForm.py
|
puchopsky/pythonPdv
|
3a53212840c83f577be4b6a48774a4399e1bee04
|
[
"MIT"
] | null | null | null |
from django.db import models
import uuid
class SaleForm(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# This field specifies wether the producto is sold by box, unit, hanlfbox, liters, etc
form = models.CharField(max_length=50, default='Unit')
# This filed sets an equivalent on how many units needs to be discounted from the stock, ex. If a box contains 12
# units then 12 units needs to be removed from the stock
totalUnitsInFromSale = models.FloatField(default=1.0)
| 36.2
| 117
| 0.745856
|
4a19886be809c305925ec789021a2df520e7edc9
| 4,018
|
py
|
Python
|
{{cookiecutter.project_slug}}/backend/app/manifest.py
|
beda-software/cookiecutter-beda-software-stack
|
f1f0aef4964d43d0720e45866420aa02b25626f8
|
[
"MIT"
] | 2
|
2021-04-16T04:50:25.000Z
|
2021-04-29T07:49:16.000Z
|
{{cookiecutter.project_slug}}/backend/app/manifest.py
|
beda-software/cookiecutter-beda-software-stack
|
f1f0aef4964d43d0720e45866420aa02b25626f8
|
[
"MIT"
] | 5
|
2020-10-08T08:14:16.000Z
|
2020-12-06T08:38:32.000Z
|
{{cookiecutter.project_slug}}/backend/app/manifest.py
|
beda-software/cookiecutter-beda-software-stack
|
f1f0aef4964d43d0720e45866420aa02b25626f8
|
[
"MIT"
] | 1
|
2020-09-21T09:10:20.000Z
|
2020-09-21T09:10:20.000Z
|
import os
from app import config
from app.access_policy import access_policies
from app.contrib.sdk_ext import (
merge_resources,
load_notification_templates,
load_resources,
load_sql_migrations,
)
meta_resources = merge_resources(
{
"Client": {
"SPA": {"secret": "123456", "grant_types": ["password"]},
"google-client": {
"auth": {
"authorization_code": {
"redirect_uri": "{}/google-signin".format(config.frontend_url)
}
},
"first_party": True,
"grant_types": ["authorization_code"],
},
},
{% if cookiecutter.add_google_oauth|lower == 'y' %}
"IdentityProvider": {
"google": {
"type": "google",
"client": {
"id": config.google_oauth_app_id,
"secret": config.google_oauth_app_secret,
},
},
},
{% endif %}
"AidboxConfig": {
"provider": {
"provider": {"console": {"type": "console"}, "default": "console"},
}
},
"AccessPolicy": access_policies,
"NotificationTemplate": {
**load_notification_templates(
os.path.join(config.root_dir, "resources/notificationtemplates/email")
),
},
"SearchParameter": {
# Place custom search parameters here
},
"PGSequence": {
# Don't forget to add new sequence to new migration
# Remove this comment after https://github.com/Aidbox/Issues/issues/167 is solved
},
"Attribute": {
# TODO: remove when Aidbox adds this status
"Notification.status": {
"path": ["status"],
"type": {"resourceType": "Entity", "id": "code"},
"resource": {"resourceType": "Entity", "id": "Notification"},
"module": "auth",
"enum": ["delivered", "error", "failure"],
},
},
},
load_resources(os.path.join(config.root_dir, "resources/entities")),
)
seeds = merge_resources(
{
"User": {
"superadmin": {
"password": config.app_superadmin_password,
"email": config.app_superadmin_email,
"data": {
"givenName": "Super",
"familyName": "Admin",
"superAdmin": {"resourceType": "Practitioner", "id": "superadmin"},
},
},
},
"Practitioner": {
"superadmin": {
"name": [{"given": ["Super"], "family": "Admin"}],
"telecom": [{"system": "email", "value": config.app_superadmin_email,}],
},
},
}
if config.dev_init
else {},
load_resources(os.path.join(config.root_dir, "resources/seeds")),
)
entities = {
{% if cookiecutter.add_push_notifications|lower == 'y' %}
"PushSubscription": {
"attrs": {
"user": {
"type": "Reference",
"isRequired": True,
"search": {"name": "user", "type": "reference",},
},
"session": {
"type": "Reference",
"isRequired": False,
"search": {"name": "session", "type": "reference",},
},
"deviceType": {
"type": "string",
"enum": ["ios", "android"],
"isRequired": True,
"search": {"name": "status", "type": "token",},
},
"deviceToken": {
"type": "string",
"isRequired": True,
"search": {"name": "device-token", "type": "token",},
},
},
},
{% endif %}
}
migrations = load_sql_migrations(os.path.join(config.root_dir, "resources/migrations"))
| 32.144
| 93
| 0.459681
|
4a1989930e62aada0c524f70a7bcac427a0973d9
| 4,828
|
py
|
Python
|
ResultsStats/AddInfo2Grid.py
|
petebunting/DEA_Mangroves_2018
|
791cb5d92c4382a0780c04a3b38c028b35224154
|
[
"MIT"
] | null | null | null |
ResultsStats/AddInfo2Grid.py
|
petebunting/DEA_Mangroves_2018
|
791cb5d92c4382a0780c04a3b38c028b35224154
|
[
"MIT"
] | null | null | null |
ResultsStats/AddInfo2Grid.py
|
petebunting/DEA_Mangroves_2018
|
791cb5d92c4382a0780c04a3b38c028b35224154
|
[
"MIT"
] | null | null | null |
import pandas
import numpy
import os.path
import osgeo.gdal as gdal
import osgeo.ogr as ogr
def calcStats(data, gridID):
maxDiff = 0
maxDiffYear = 0
for i in range(data[gridID][['total']].index.shape[0]):
if i == 1:
maxDiff = abs(data[gridID][['total']].values[i][0] - data[gridID][['total']].values[i-1][0])
maxDiffYear = int(data[gridID][['total']].index[i])
elif i > 1:
diff = abs(data[gridID][['total']].values[i][0] - data[gridID][['total']].values[i-1][0])
if diff > maxDiff:
maxDiff = diff
maxDiffYear = int(data[gridID][['total']].index[i])
StdTotalVal = data[gridID, 'total'].std()/data[gridID, 'total'].mean()
MaxStdTotVal = numpy.max([data[gridID, 'low'].std(), data[gridID, 'mid'].std(), data[gridID, 'high'].std()])/data[gridID, 'total'].mean()
MangAreaVal = data[gridID, 'total'].mean()
diff8716AreaVal = data[gridID, 'total']['2016'] - data[gridID, 'total']['1987']
diff1216AreaVal = data[gridID, 'total']['2016'] - data[gridID, 'total']['2012']
diff9110AreaVal = data[gridID, 'total']['2010'] - data[gridID, 'total']['1991']
diff1016AreaVal = data[gridID, 'total']['2016'] - data[gridID, 'total']['2010']
diff9116AreaVal = data[gridID, 'total']['2016'] - data[gridID, 'total']['1991']
return maxDiffYear, StdTotalVal, MaxStdTotVal, MangAreaVal, diff8716AreaVal, diff1216AreaVal, diff9110AreaVal, diff1016AreaVal, diff9116AreaVal
gridSHP = '/Users/pete/Temp/AustralianMangroves/AustraliaSqGrid_MangroveRegionsV1.shp'
outGridSHP = '/Users/pete/Temp/AustralianMangroves/AustraliaSqGrid_MangroveRegionsV1_ExtraV3Info.shp'
data = pandas.read_pickle("MangChangePVFC_V3.0_1987_to_2016.pkl.gz", compression="gzip")
inDataSet = gdal.OpenEx(gridSHP, gdal.OF_VECTOR )
if inDataSet is None:
raise("Failed to open input shapefile\n")
inLayer = inDataSet.GetLayer()
# Create shapefile driver
driver = gdal.GetDriverByName( "ESRI Shapefile" )
# create the output layer
if os.path.exists(outGridSHP):
raise Exception('Output shapefile already exists - stopping.')
outDataSet = driver.Create(outGridSHP, 0, 0, 0, gdal.GDT_Unknown )
outLyrName = os.path.splitext(os.path.basename(outGridSHP))[0]
outLayer = outDataSet.CreateLayer(outLyrName, inLayer.GetSpatialRef(), inLayer.GetGeomType() )
inLayerDefn = inLayer.GetLayerDefn()
for i in range(0, inLayerDefn.GetFieldCount()):
fieldDefn = inLayerDefn.GetFieldDefn(i)
outLayer.CreateField(fieldDefn)
yearMaxField = ogr.FieldDefn("YearMax", ogr.OFTInteger)
outLayer.CreateField(yearMaxField)
stdTotalField = ogr.FieldDefn("StdTotal", ogr.OFTReal)
outLayer.CreateField(stdTotalField)
maxStdTotalField = ogr.FieldDefn("MaxStdTot", ogr.OFTReal)
outLayer.CreateField(maxStdTotalField)
meanAreaField = ogr.FieldDefn("MangArea", ogr.OFTInteger)
outLayer.CreateField(meanAreaField)
diff8716AreaField = ogr.FieldDefn("d8716Area", ogr.OFTInteger)
outLayer.CreateField(diff8716AreaField)
diff1216AreaField = ogr.FieldDefn("d1216Area", ogr.OFTInteger)
outLayer.CreateField(diff1216AreaField)
diff9110AreaField = ogr.FieldDefn("d9110Area", ogr.OFTInteger)
outLayer.CreateField(diff9110AreaField)
diff1016AreaField = ogr.FieldDefn("d1016Area", ogr.OFTInteger)
outLayer.CreateField(diff1016AreaField)
diff9116AreaField = ogr.FieldDefn("d9116Area", ogr.OFTInteger)
outLayer.CreateField(diff9116AreaField)
outLayerDefn = outLayer.GetLayerDefn()
# loop through the input features
inFeature = inLayer.GetNextFeature()
while inFeature:
geom = inFeature.GetGeometryRef()
if geom is not None:
gridID = inFeature.GetField('GridID')
print(gridID)
outFeature = ogr.Feature(outLayerDefn)
outFeature.SetGeometry(geom)
for i in range(0, inLayerDefn.GetFieldCount()):
outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
YearMaxVal, StdTotalVal, MaxStdTotVal, MangAreaVal, diff8716AreaVal, diff1216AreaVal, diff9110AreaVal, diff1016AreaVal, diff9116AreaVal = calcStats(data, gridID)
outFeature.SetField("YearMax", YearMaxVal)
outFeature.SetField("StdTotal", StdTotalVal)
outFeature.SetField("MaxStdTot", MaxStdTotVal)
outFeature.SetField("MangArea", MangAreaVal)
outFeature.SetField("d8716Area", float(diff8716AreaVal))
outFeature.SetField("d1216Area", float(diff1216AreaVal))
outFeature.SetField("d9110Area", float(diff9110AreaVal))
outFeature.SetField("d1016Area", float(diff1016AreaVal))
outFeature.SetField("d9116Area", float(diff9116AreaVal))
outLayer.CreateFeature(outFeature)
outFeature = None
inFeature = inLayer.GetNextFeature()
# Save and close the shapefiles
inDataSet = None
outDataSet = None
| 43.107143
| 169
| 0.719345
|
4a1989ba8e28e7e0599e645b21a661dbe91e1e2e
| 1,104
|
py
|
Python
|
modules/users/migrations/0001_initial.py
|
bobjiangps/automation_center
|
970262fe30942e6a9fc236f1ca41f060d3eb9f9d
|
[
"MIT"
] | 8
|
2021-02-05T08:34:49.000Z
|
2022-03-12T09:55:11.000Z
|
modules/users/migrations/0001_initial.py
|
bobjiangps/automation_center
|
970262fe30942e6a9fc236f1ca41f060d3eb9f9d
|
[
"MIT"
] | null | null | null |
modules/users/migrations/0001_initial.py
|
bobjiangps/automation_center
|
970262fe30942e6a9fc236f1ca41f060d3eb9f9d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.9 on 2020-08-14 10:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0011_update_proxy_permissions'),
('projects', '0007_auto_20200813_1750'),
]
operations = [
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('group', 'project', 'user')},
},
),
]
| 34.5
| 118
| 0.622283
|
4a1989c750d03924231e1abdd598ef113a3e20bc
| 3,782
|
py
|
Python
|
python_pubsub/apps/publish.py
|
kragen/mod_pubsub
|
9abcdb07b02b7979bf4538ac8047783100ecb7bc
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-04-05T14:53:29.000Z
|
2021-04-05T14:53:29.000Z
|
python_pubsub/apps/publish.py
|
kragen/mod_pubsub
|
9abcdb07b02b7979bf4538ac8047783100ecb7bc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
python_pubsub/apps/publish.py
|
kragen/mod_pubsub
|
9abcdb07b02b7979bf4538ac8047783100ecb7bc
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-04-05T14:53:41.000Z
|
2021-04-05T14:53:41.000Z
|
#!/usr/bin/python
"""
publish.py -- Command-line publish.
Arguments are server URL, topic, payload size, and expires.
Defaults are http://127.0.0.1:8000/kn , /what/chat , 1024, and +15.
Example of usage:
./publish.py http://127.0.0.1:8000/kn /what/test "1024*1024" +15
$Id: publish.py,v 1.3 2004/04/19 05:39:15 bsittler Exp $
Contact Information:
http://mod-pubsub.sf.net/
mod-pubsub-developer@lists.sourceforge.net
"""
## Copyright 2000-2004 KnowNow, Inc. All rights reserved.
## @KNOWNOW_LICENSE_START@
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
##
## 1. Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
##
## 3. Neither the name of the KnowNow, Inc., nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## @KNOWNOW_LICENSE_END@
##
# Include standard system libraries:
import sys
# Include local pubsub library:
sys.path = [ "../" ] + sys.path
import pubsublib, scheduler, asyncore
class publish_payload:
def __init__(self,
server_url = "http://127.0.0.1:8000/kn",
topic = "/what/chat",
payload_size = "1024",
expires = "+15"):
ua = pubsublib.HTTPUserAgent()
self.client = pubsublib.SimpleClient(ua, server_url)
print ("\nPublishing message of size " + str(eval(payload_size)) +
" and expires " + expires + ".\n" +
"Note that this creates two HTTPConnections: one for the tunnel" +
" and one for the post.\n")
self.client.publish(topic,
{ "kn_payload" : "X" * eval(payload_size),
"kn_expires" : expires },
self)
self.running = 1
def onStatus(self, event):
print ("\n\nMessage published. Status is " + event['status'] + ".\n" +
event['kn_payload'] + "\n\n")
self.client.disconnect()
asyncore.poll()
self.running = 0
def main(argv):
server_url = argv[1]
if len(argv) > 2:
topic = argv[2]
if len(argv) > 3:
payload_size = argv[3]
if len(argv) > 4:
expires = argv[4]
publisher = publish_payload(server_url, topic, payload_size, expires)
while publisher.running:
asyncore.poll(scheduler.timeout())
scheduler.run()
if __name__ == "__main__": main(sys.argv)
# End of publish.py
| 36.019048
| 81
| 0.640137
|
4a1989cfc74de2130e7c84df6b9a1a26795dd873
| 2,235
|
py
|
Python
|
kedro_mlflow/mlflow/kedro_pipeline_model.py
|
akruszewski/kedro-mlflow
|
330cab52642a0993e957740726e7d453282f1588
|
[
"Apache-2.0"
] | null | null | null |
kedro_mlflow/mlflow/kedro_pipeline_model.py
|
akruszewski/kedro-mlflow
|
330cab52642a0993e957740726e7d453282f1588
|
[
"Apache-2.0"
] | null | null | null |
kedro_mlflow/mlflow/kedro_pipeline_model.py
|
akruszewski/kedro-mlflow
|
330cab52642a0993e957740726e7d453282f1588
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from pathlib import Path
from kedro.io import DataCatalog, MemoryDataSet
from kedro.runner import SequentialRunner
from mlflow.pyfunc import PythonModel
from kedro_mlflow.pipeline.pipeline_ml import PipelineML
class KedroPipelineModel(PythonModel):
def __init__(self, pipeline_ml: PipelineML, catalog: DataCatalog):
self.pipeline_ml = pipeline_ml
self.initial_catalog = pipeline_ml.extract_pipeline_catalog(catalog)
self.loaded_catalog = DataCatalog()
def load_context(self, context):
# a consistency check is made when loading the model
# it would be better to check when saving the model
# but we rely on a mlflow function for saving, and it is unaware of kedro
# pipeline structure
mlflow_artifacts_keys = set(context.artifacts.keys())
kedro_artifacts_keys = set(
self.pipeline_ml.inference.inputs() - {self.pipeline_ml.input_name}
)
if mlflow_artifacts_keys != kedro_artifacts_keys:
in_artifacts_but_not_inference = (
mlflow_artifacts_keys - kedro_artifacts_keys
)
in_inference_but_not_artifacts = (
kedro_artifacts_keys - mlflow_artifacts_keys
)
raise ValueError(
f"Provided artifacts do not match catalog entries:\n- 'artifacts - inference.inputs()' = : {in_artifacts_but_not_inference}'\n- 'inference.inputs() - artifacts' = : {in_inference_but_not_artifacts}'"
)
self.loaded_catalog = deepcopy(self.initial_catalog)
for name, uri in context.artifacts.items():
self.loaded_catalog._data_sets[name]._filepath = Path(uri)
def predict(self, context, model_input):
# TODO : checkout out how to pass extra args in predict
# for instance, to enable parallelization
self.loaded_catalog.add(
data_set_name=self.pipeline_ml.input_name,
data_set=MemoryDataSet(model_input),
replace=True,
)
runner = SequentialRunner()
run_outputs = runner.run(
pipeline=self.pipeline_ml.inference, catalog=self.loaded_catalog
)
return run_outputs
| 39.210526
| 215
| 0.6783
|
4a1989cfc8242cd22f370aaa1caf0c4cfa907a59
| 2,670
|
py
|
Python
|
config/settings/local.py
|
SantiR38/trello-api
|
a3bbbf6ab07169511819d0d6c1eee11d5f474542
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
SantiR38/trello-api
|
a3bbbf6ab07169511819d0d6c1eee11d5f474542
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
SantiR38/trello-api
|
a3bbbf6ab07169511819d0d6c1eee11d5f474542
|
[
"MIT"
] | null | null | null |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="yQuUgvQmoKLJiqFamg7fOUGIb1d9RRUYwFsnBamFBcyjbyMkSQjEAf2JDjHYbQRo",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
CORS_ORIGIN_ALLOW_ALL = True
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development
INSTALLED_APPS = ["whitenoise.runserver_nostatic"] + INSTALLED_APPS # noqa F405
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| 40.454545
| 97
| 0.583895
|
4a198a7613a0119b6728c22e0519b16f197e48c7
| 30,244
|
py
|
Python
|
seshat/stock/models.py
|
XecusM/SESHAT
|
34cf989e99e11f645339ce7190d92ff816062243
|
[
"MIT"
] | null | null | null |
seshat/stock/models.py
|
XecusM/SESHAT
|
34cf989e99e11f645339ce7190d92ff816062243
|
[
"MIT"
] | null | null | null |
seshat/stock/models.py
|
XecusM/SESHAT
|
34cf989e99e11f645339ce7190d92ff816062243
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models import Sum
from django.db.models import Q
from django.http import Http404
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
import uuid
import os
# Help Functions
def get_image_path(instance, filename):
'''
Uploading image function
'''
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('items/', filename)
# Create your models here.
class Category(models.Model):
'''
Model for items categories
'''
name = models.CharField(
verbose_name=_('Category name'),
max_length=128,
unique=True,
blank=False,
null=False
)
edited_at = models.DateTimeField(
verbose_name=_('Edited at'),
null=True,
blank=True
)
edited_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Edited by'),
related_name='category_item_user_edit',
on_delete=models.PROTECT,
blank=True,
null=True
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
created_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Created by'),
related_name='category_item_user_create',
on_delete=models.PROTECT,
blank=True,
null=True
)
def clean(self):
'''
Change cleaed data before save to datase
'''
self.name = self.name.capitalize()
def get_items(self):
'''
Get all items for selected category
'''
return self.item_category.filter(category=self.id)
def edited(self, user):
'''
Edit Category
'''
self.edited_at = timezone.now()
self.edited_by = user
self.save()
def __str__(self):
'''
String representation for the record
'''
return self.name
class Item(models.Model):
'''
Model for the items
'''
code = models.CharField(
verbose_name=_('Code'),
max_length=8,
unique=True,
blank=False,
null=False
)
desciption = models.CharField(
verbose_name=_('Desciption'),
max_length=128,
unique=False,
blank=True,
null=True
)
barcode = models.CharField(
verbose_name=_('Barcode'),
max_length=128,
unique=True,
blank=True,
null=True
)
stock_limit = models.IntegerField(
verbose_name=_('Stock Limit'),
unique=False,
blank=True,
null=True
)
is_assembly = models.BooleanField(
verbose_name=_('Assembled Item'),
default=False
)
category = models.ForeignKey(
'Category',
verbose_name=_('Category'),
related_name='item_category',
on_delete=models.PROTECT,
blank=False,
null=False
)
location = models.ForeignKey(
'SubLocation',
verbose_name=_('Default Location'),
related_name='item_location',
on_delete=models.PROTECT,
blank=False,
null=False
)
price = models.DecimalField(
verbose_name=_('Unit Price'),
max_digits=8,
decimal_places=2,
unique=False,
blank=False,
null=False
)
photo = models.ImageField(
verbose_name=_('Item Picture'),
upload_to=get_image_path,
null=True,
blank=True
)
is_active = models.BooleanField(
verbose_name=_('Enabled'),
default=True
)
note = models.TextField(
verbose_name=_('Notes'),
max_length=255,
unique=False,
blank=True,
null=True
)
edited_at = models.DateTimeField(
verbose_name=_('Edited at'),
null=True,
blank=True
)
edited_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Edited by'),
related_name='item_user_edit',
on_delete=models.PROTECT,
blank=True,
null=True
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
created_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Created by'),
related_name='item_user_create',
on_delete=models.PROTECT,
blank=True,
null=True
)
def clean(self):
'''
Change cleaed data before save to datase
'''
# let code be upper case only
self.code = self.code.upper()
@property
def quantity(self):
'''
return item quantity from its movements
'''
if self.is_assembly:
assembly_item = self.assembly_item.filter(item=self.id)
quantity = None
if assembly_item:
for item in assembly_item:
if quantity is None:
quantity = int(int(item.sub_item.quantity) \
/ item.quantity)
else:
quantity = min(
quantity,
int(int(item.sub_item.quantity) \
/ item.quantity))
return int(quantity)
else:
raise ValidationError(
_("Can't find items for assemblied item"))
else:
# sum all add movements
sum_add = self.item_move_item.filter(
item=self.id,
type='A').aggregate(Sum('quantity'))
if sum_add['quantity__sum'] is None:
sum_add['quantity__sum'] = 0
# sum all remove movements
sum_remove = self.item_move_item.filter(
item=self.id,
type='R').aggregate(Sum('quantity'))
if sum_remove['quantity__sum'] is None:
sum_remove['quantity__sum'] = 0
# calculate the deffreance between the add and remove sums
sum_all = sum_add['quantity__sum'] - sum_remove['quantity__sum']
return int(sum_all)
def get_item_moves(self):
'''
Get all item moves
'''
return self.item_move_item.filter(
item=self).order_by('-created_at')
def get_assembly_items(self):
'''
Get all items for the selected assemblied item
'''
if self.is_assembly:
return self.assembly_item.filter(item=self.id).order_by('id')
else:
raise ValidationError(_('This is not assembly item'))
def get_quantity(self, location):
'''
Get avalible quantity for sublocation
'''
if self.is_assembly:
return int(self.quantity)
else:
# sum all add movements
sum_add = self.item_move_item.filter(
item=self.id,
location=location,
type='A').aggregate(Sum('quantity'))
if sum_add['quantity__sum'] is None:
sum_add['quantity__sum'] = 0
# sum all remove movements
sum_remove = self.item_move_item.filter(
item=self.id,
location=location,
type='R').aggregate(Sum('quantity'))
if sum_remove['quantity__sum'] is None:
sum_remove['quantity__sum'] = 0
# calculate the deffreance between the add and remove sums
sum_all = sum_add['quantity__sum'] - sum_remove['quantity__sum']
return int(sum_all)
def get_locations(self):
'''
Get quantities by location
'''
if not self.is_assembly:
all_locations = [{
'name': f"{move.location.location.name} / {move.location.name}",
'id': move.location.id
} for move in self.item_move_item.filter(item=self.id)]
# Remove deplucation
locations = [d for i, d in enumerate(
all_locations) if d not in all_locations[i + 1:]]
for i, location in enumerate(locations):
# sum all add movements
sum_add = self.item_move_item.filter(
item=self.id,
type='A',
location=location['id']
).aggregate(Sum('quantity'))
if sum_add['quantity__sum'] is None:
sum_add['quantity__sum'] = 0
# sum all remove movements
sum_remove = self.item_move_item.filter(
item=self.id,
type='R',
location=location['id']
).aggregate(Sum('quantity'))
if sum_remove['quantity__sum'] is None:
sum_remove['quantity__sum'] = 0
# calculate the deffreance between the add and remove sums
sum_all = sum_add[
'quantity__sum'] - sum_remove['quantity__sum']
locations[i]['quantity'] = sum_all
return locations
def edited(self, user):
'''
Edit item
'''
self.edited_at = timezone.now()
self.edited_by = user
self.save()
def __str__(self):
'''
String representation for the record
'''
return self.code
class AssemblyItem(models.Model):
'''
link all items to assemblied item
'''
item = models.ForeignKey(
'Item',
verbose_name=_('Item'),
limit_choices_to={'is_assembly': True},
related_name='assembly_item',
on_delete=models.CASCADE,
blank=False,
null=False
)
sub_item = models.ForeignKey(
'Item',
verbose_name=_('Item'),
related_name='assembly_sub_item',
on_delete=models.PROTECT,
blank=False,
null=False
)
quantity = models.IntegerField(
verbose_name=_('Quantity'),
unique=False,
blank=False,
null=False
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
@property
def sub_item_quantity(self):
'''
return asembled item quantity from its sub-items
'''
return self.sub_item.quantity
def __str__(self):
'''
String representation for the record
'''
return f"{self.item.code}/{self.sub_item.code}"
class Location(models.Model):
'''
Model for item locations to store
'''
name = models.CharField(
verbose_name=_('Location name'),
max_length=128,
unique=True,
blank=False,
null=False
)
edited_at = models.DateTimeField(
verbose_name=_('Edited at'),
null=True,
blank=True
)
edited_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Edited by'),
related_name='location_user_edit',
on_delete=models.PROTECT,
blank=True,
null=True
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
created_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Created by'),
related_name='location_user_create',
on_delete=models.PROTECT,
blank=True,
null=True
)
def clean(self):
'''
Change cleaed data before save to datase
'''
self.name = self.name.upper()
def get_sub_locations(self):
'''
Get all sub-locations for selected location
'''
return self.sub_location.filter(location=self.id)
def edited(self, user):
'''
Edit location
'''
self.edited_at = timezone.now()
self.edited_by = user
self.save()
def __str__(self):
'''
String representation for the record
'''
return self.name
class SubLocation(models.Model):
'''
Model for item sub-locations to store
'''
location = models.ForeignKey(
'Location',
verbose_name=_('Location'),
related_name='sub_location',
on_delete=models.PROTECT,
blank=False,
null=False
)
name = models.CharField(
verbose_name=_('Sub-location name'),
max_length=128,
unique=True,
blank=False,
null=False
)
edited_at = models.DateTimeField(
verbose_name=_('Edited at'),
null=True,
blank=True
)
edited_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Edited by'),
related_name='sublocation_user_edit',
on_delete=models.PROTECT,
blank=True,
null=True
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
created_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Created by'),
related_name='sublocation_user_create',
on_delete=models.PROTECT,
blank=True,
null=True
)
def clean(self):
'''
Change cleaed data before save to datase
'''
self.name = self.name.upper()
def get_items(self):
'''
return all items in this sub-location
'''
item_moves = self.item_move_location.filter(location=self.id)
return Item.objects.filter(item_move_item__in=item_moves)
def edited(self, user):
'''
Edit sub-location
'''
self.edited_at = timezone.now()
self.edited_by = user
self.save()
def __str__(self):
'''
String representation for the record
'''
return f"{self.location.name} / {self.name}"
class ItemMove(models.Model):
'''
Model for item movements done on the items
'''
#########################################
# Choices
ADD = 'A'
REMOVE = 'R'
type_choices = [
(ADD, _('Add')),
(REMOVE, _('Remove'))
]
PURCHASE = 'P'
SELL = 'S'
ASSEMBLY = 'A'
CUSTOM = 'U'
TRANSFER = 'T'
related_choices = [
(PURCHASE, _('Purchase')),
(SELL, _('Sell')),
(ASSEMBLY, _('Assembly')),
(CUSTOM, _('Custom')),
(TRANSFER, _('Transfer'))
]
#########################################
item = models.ForeignKey(
'Item',
verbose_name=_('Item'),
related_name='item_move_item',
on_delete=models.PROTECT,
blank=False,
null=False
)
location = models.ForeignKey(
'SubLocation',
verbose_name=_('Location'),
related_name='item_move_location',
on_delete=models.PROTECT,
blank=False,
null=False
)
type = models.CharField(
verbose_name=_('Movement type'),
max_length=1,
choices=type_choices,
blank=False,
null=False
)
quantity = models.IntegerField(
verbose_name=_('Quantity'),
validators=[MinValueValidator(1)],
unique=False,
blank=False,
null=False
)
related_to = models.CharField(
verbose_name=_('Related to'),
max_length=1,
choices=related_choices,
default=CUSTOM,
blank=False,
null=False
)
note = models.TextField(
verbose_name=_('Notes'),
max_length=255,
unique=False,
blank=True,
null=True
)
edited_at = models.DateTimeField(
verbose_name=_('Edited at'),
null=True,
blank=True
)
edited_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Edited by'),
related_name='item_move_user_edit',
on_delete=models.PROTECT,
blank=True,
null=True
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
created_by = models.ForeignKey(
get_user_model(),
verbose_name=_('Created by'),
related_name='item_move_user_create',
on_delete=models.PROTECT,
blank=True,
null=True
)
def edited(self, user):
'''
Edit movement
'''
self.edited_at = timezone.now()
self.edited_by = user
self.save()
def delete(self):
'''
Delete Item
'''
if self.item.is_assembly:
moves_values = self.assembly_move_assembly_move.get(
id=self).item_moves.values_list('id', flat=True)
item_moves = ItemMove.objects.filter(id__in=moves_values)
for item_move in item_moves:
item_move.delete()
super().delete()
def save(self, *args, **kwargs):
'''
Save record method
'''
if self.item.get_quantity(self.location.id) < int(self.quantity) and \
self.type == ItemMove.REMOVE and not self.item.is_assembly:
raise ValidationError(
_("Quantity can't be negative for the selected location"))
else:
super().save(*args, **kwargs)
if self.item.is_assembly:
if AssemblyMove.objects.filter(
assembly_move=self).exists():
moves_values = self.assembly_move_assembly_move.get(
assembly_move=self).item_moves.values_list(
'id', flat=True)
item_moves = ItemMove.objects.filter(id__in=moves_values)
for item_move in item_moves:
item_move.quantity = self.quantity * (
sub_item.quantity for sub_item in self.item.get_assembly_items() if sub_item == self.item)
item_move.save()
else:
sub_item_list = list()
for sub_item in self.item.get_assembly_items():
sub_item_move = ItemMove.objects.create(
item=sub_item.sub_item,
location=sub_item.sub_item.location,
type=self.type,
related_to=ItemMove.ASSEMBLY,
quantity=self.quantity * sub_item.quantity)
sub_item_list.append(sub_item_move)
assembly_move = AssemblyMove.objects.create(
assembly_move=self)
for sub_item_add in sub_item_list:
assembly_move.item_moves.add(sub_item_add)
assembly_move.save()
def __str__(self):
'''
String representation for the record
'''
return f"{self.type}-{self.id}"
class AssemblyMove(models.Model):
'''
Model for assemblied items retlated movements
'''
assembly_move = models.OneToOneField(
'ItemMove',
verbose_name=_('Item'),
limit_choices_to=~Q(related_to=ItemMove.ASSEMBLY),
related_name='assembly_move_assembly_move',
on_delete=models.CASCADE,
blank=False,
null=False
)
item_moves = models.ManyToManyField(
'ItemMove',
verbose_name=_("Items' moves"),
limit_choices_to={
'related_to': ItemMove.ASSEMBLY},
related_name='assembly_move_item_move',
blank=True,
)
edited_at = models.DateTimeField(
verbose_name=_('Edited at'),
null=True,
blank=True
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
def edited(self):
'''
Edit assembly movement
'''
self.edited_at = timezone.now()
self.save()
def delete(self):
'''
Delete Item
'''
moves_values = self.item_moves.values_list('id', flat=True)
item_moves = ItemMove.objects.filter(id__in=moves_values)
super().delete()
for item_move in item_moves:
item_move.delete()
def __str__(self):
'''
String representation for the record
'''
return f"{self.assembly_move.item.code}-{self.id}"
class ItemTransfer(models.Model):
'''
Model for transfer item from location to another
'''
item = models.ForeignKey(
'Item',
verbose_name=_('Item'),
related_name='item_transfer_item',
on_delete=models.PROTECT,
blank=False,
null=False
)
old_location = models.ForeignKey(
'SubLocation',
verbose_name=_('Old Location'),
related_name='item_transfer_old_location',
on_delete=models.PROTECT,
blank=False,
null=False
)
new_location = models.ForeignKey(
'SubLocation',
verbose_name=_('New Location'),
related_name='item_transfer_new_location',
on_delete=models.PROTECT,
blank=False,
null=False
)
add_move = models.ForeignKey(
'ItemMove',
verbose_name=_('Add Move'),
limit_choices_to={
'type': ItemMove.ADD,
'related_to': ItemMove.TRANSFER},
related_name='item_move_transfer_add',
on_delete=models.PROTECT,
blank=False,
null=False
)
remove_move = models.ForeignKey(
'ItemMove',
verbose_name=_('Remove Move'),
limit_choices_to={
'type': ItemMove.REMOVE,
'related_to': ItemMove.TRANSFER},
related_name='item_move_transfer_remove',
on_delete=models.PROTECT,
blank=False,
null=False
)
edited_at = models.DateTimeField(
verbose_name=_('Edited at'),
null=True,
blank=True
)
created_at = models.DateTimeField(
verbose_name=_('Created at'),
auto_now_add=True,
blank=False
)
def edited(self):
'''
Edit item transfer
'''
self.edited_at = timezone.now()
self.save()
def __str__(self):
'''
String representation for the record
'''
return f"{self.item.code}-({self.old_location}-{self.new_location})"
# Signals
@receiver(models.signals.post_delete, sender=Item)
def auto_delete_file_on_delete(sender, instance, **kwargs):
'''
Deletes file from filesystem
when corresponding `Item` object is deleted.
'''
if instance.photo and os.path.isfile(instance.photo.path):
os.remove(instance.photo.path)
@receiver(models.signals.pre_save, sender=Item)
def auto_delete_file_on_change(sender, instance, **kwargs):
'''
Deletes old file from filesystem
when corresponding `Item` object is updated
with new file.
'''
if instance.pk:
old_photo = Item.objects.get(pk=instance.pk).photo
new_photo = instance.photo
if not old_photo == new_photo and old_photo and \
os.path.isfile(old_photo.path):
os.remove(old_photo.path)
@receiver(models.signals.pre_save, sender=ItemMove)
def auto_delete_moves_on_change(sender, instance, **kwargs):
'''
Deletes old assembly moves when corresponding `ItemMove` object is updated
with not assmbly item.
'''
if instance.pk:
old_move = ItemMove.objects.get(id=instance.id)
if not instance.item == old_move.item and old_move.item.is_assembly:
moves_values = instance.assembly_move_assembly_move.get(
id=old_move.id).item_moves.values_list(
'id', flat=True)
item_moves = ItemMove.objects.filter(id__in=moves_values)
for item_move in item_moves:
item_move.delete()
| 35.085847
| 118
| 0.444716
|
4a198b8fb59210446dac3d6420ce2dcc81b8f503
| 1,041
|
py
|
Python
|
core/plugins/rabbitmq.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | 12
|
2020-06-02T14:22:40.000Z
|
2021-04-07T15:58:09.000Z
|
core/plugins/rabbitmq.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | 72
|
2020-06-09T00:35:19.000Z
|
2021-09-29T11:00:41.000Z
|
core/plugins/rabbitmq.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | 43
|
2020-06-05T15:09:37.000Z
|
2021-09-25T12:28:28.000Z
|
from core.ycheck.events import YEventCheckerBase
from core import (
checks,
plugintools,
)
RMQ_SERVICES_EXPRS = [
r"beam.smp",
r"epmd",
r"rabbitmq-server",
]
RMQ_PACKAGES = [
r"rabbitmq-server",
]
class RabbitMQBase(object):
pass
class RabbitMQChecksBase(RabbitMQBase, plugintools.PluginPartBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.apt_check = checks.APTPackageChecksBase(core_pkgs=RMQ_PACKAGES)
@property
def plugin_runnable(self):
if self.apt_check.core:
return True
return False
class RabbitMQServiceChecksBase(RabbitMQChecksBase, checks.ServiceChecksBase):
def __init__(self, *args, **kwargs):
super().__init__(service_exprs=RMQ_SERVICES_EXPRS,
*args, hint_range=(0, 3), **kwargs)
class RabbitMQEventChecksBase(RabbitMQChecksBase, YEventCheckerBase):
def __call__(self):
ret = self.run_checks()
if ret:
self._output.update(ret)
| 21.6875
| 78
| 0.666667
|
4a199020f2834b9ff173bd0fb96ff46050a1090c
| 1,195
|
py
|
Python
|
src/test/gen_rule_test.py
|
MerlinXYoung/typhoon-blade
|
b1605fac6a2f112f98e2fb8eb4df64c0b4bb5500
|
[
"BSD-3-Clause"
] | 2
|
2019-11-11T04:03:42.000Z
|
2019-11-11T04:03:47.000Z
|
src/test/gen_rule_test.py
|
MerlinXYoung/typhoon-blade
|
b1605fac6a2f112f98e2fb8eb4df64c0b4bb5500
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/gen_rule_test.py
|
MerlinXYoung/typhoon-blade
|
b1605fac6a2f112f98e2fb8eb4df64c0b4bb5500
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the test module for cc_gen_rule target.
"""
import blade_test
class TestGenRule(blade_test.TargetTest):
"""Test gen_rule """
def setUp(self):
"""setup method. """
self.doSetUp('test_gen_rule')
def testGenerateRules(self):
"""Test that rules are generated correctly. """
self.assertTrue(self.dryRun())
com_lower_line = self.findCommand('plowercase.cpp.o -c')
com_upper_line = self.findCommand('puppercase.cpp.o -c')
self.assertCxxFlags(com_lower_line)
self.assertCxxFlags(com_upper_line)
lower_so_index = self.findCommandAndLine(
['-shared', 'liblowercase.so', 'plowercase.cpp.o'])
gen_rule_index = self.findCommandAndLine('echo')
upper_so_index = self.findCommandAndLine(
['-shared', 'libuppercase.so', 'puppercase.cpp.o'])
#self.assertGreater(gen_rule_index, lower_so_index)
#self.assertGreater(upper_so_index, gen_rule_index)
if __name__ == '__main__':
blade_test.run(TestGenRule)
| 27.159091
| 67
| 0.662762
|
4a19921608f2140d22f7399aa1c5a03921d8e610
| 15,732
|
py
|
Python
|
influxdb_client/service/default_service.py
|
Onemind-Services-LLC/influxdb-client-python
|
c902f07acadc07234ee845256bfc60ebdd296d63
|
[
"MIT"
] | null | null | null |
influxdb_client/service/default_service.py
|
Onemind-Services-LLC/influxdb-client-python
|
c902f07acadc07234ee845256bfc60ebdd296d63
|
[
"MIT"
] | null | null | null |
influxdb_client/service/default_service.py
|
Onemind-Services-LLC/influxdb-client-python
|
c902f07acadc07234ee845256bfc60ebdd296d63
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from influxdb_client.api_client import ApiClient
class DefaultService(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None): # noqa: E501,D401,D403
"""DefaultService - a operation defined in OpenAPI."""
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_routes(self, **kwargs): # noqa: E501,D401,D403
"""Map of all top level routes available.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_routes(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: Routes
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_routes_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_routes_with_http_info(**kwargs) # noqa: E501
return data
def get_routes_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""Map of all top level routes available.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_routes_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: Routes
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_routes" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Routes', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_telegraf_plugins(self, **kwargs): # noqa: E501,D401,D403
"""get_telegraf_plugins.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_telegraf_plugins(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str type: The type of plugin desired.
:return: TelegrafPlugins
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_telegraf_plugins_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_telegraf_plugins_with_http_info(**kwargs) # noqa: E501
return data
def get_telegraf_plugins_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""get_telegraf_plugins.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_telegraf_plugins_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str type: The type of plugin desired.
:return: TelegrafPlugins
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span', 'type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_telegraf_plugins" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type'])) # noqa: E501
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/telegraf/plugins', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TelegrafPlugins', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def post_signin(self, **kwargs): # noqa: E501,D401,D403
"""Exchange basic auth credentials for session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_signin(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str authorization: An auth credential for the Basic scheme
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_signin_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.post_signin_with_http_info(**kwargs) # noqa: E501
return data
def post_signin_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""Exchange basic auth credentials for session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_signin_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str authorization: An auth credential for the Basic scheme
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span', 'authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_signin" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuth'] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/signin', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def post_signout(self, **kwargs): # noqa: E501,D401,D403
"""Expire the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_signout(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_signout_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.post_signout_with_http_info(**kwargs) # noqa: E501
return data
def post_signout_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""Expire the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_signout_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_signout" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/signout', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
| 36.757009
| 120
| 0.614989
|
4a1993440effe78e0e557bdc132e949d9e81b842
| 821
|
py
|
Python
|
helpers/erai/erai2icar.py
|
scrasmussen/icar
|
88c59fed7595b176a81127993785fdeb514f28a3
|
[
"MIT"
] | 61
|
2016-03-15T18:57:19.000Z
|
2022-03-30T03:00:55.000Z
|
helpers/erai/erai2icar.py
|
scrasmussen/icar
|
88c59fed7595b176a81127993785fdeb514f28a3
|
[
"MIT"
] | 42
|
2016-03-17T16:10:59.000Z
|
2022-03-23T19:57:09.000Z
|
helpers/erai/erai2icar.py
|
scrasmussen/icar
|
88c59fed7595b176a81127993785fdeb514f28a3
|
[
"MIT"
] | 50
|
2015-12-09T18:13:47.000Z
|
2021-12-09T02:29:35.000Z
|
#!/usr/bin/env python
import os,traceback,sys
import config
import io_routines
import output
import convert
def main(info):
for i in range(info.ntimes):
raw_data=io_routines.load_data(info.times[i],info)
processed_data=convert.era2icar(raw_data)
output.write_file(info.times[i],info,processed_data)
if __name__ == '__main__':
try:
info=config.parse()
config.update_info(info)
exit_code = main(info)
if exit_code is None:
exit_code = 0
sys.exit(exit_code)
except KeyboardInterrupt as e: # Ctrl-C
raise e
except SystemExit as e: # sys.exit()
raise e
except Exception as e:
print('ERROR, UNEXPECTED EXCEPTION')
print(str(e))
traceback.print_exc()
os._exit(1)
| 24.147059
| 60
| 0.62363
|
4a199383a246f7ec0b57c97f9f091af7e8870c2b
| 2,096
|
py
|
Python
|
cheritest/trunk/tests/mem/test_raw_ld.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 36
|
2015-05-29T16:47:19.000Z
|
2022-02-08T21:16:26.000Z
|
cheritest/trunk/tests/mem/test_raw_ld.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 2
|
2020-06-02T13:44:55.000Z
|
2020-06-02T14:06:29.000Z
|
cheritest/trunk/tests/mem/test_raw_ld.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 15
|
2015-06-11T07:10:58.000Z
|
2021-06-18T05:14:54.000Z
|
#-
# Copyright (c) 2011 Steven J. Murdoch
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_raw_ld(BaseBERITestCase):
def test_a0(self):
'''Test load double word instruction'''
self.assertRegisterEqual(self.MIPS.a0, 0xfedcba9876543210, "Double word load failed")
def test_a1(self):
'''Test load positive double word'''
self.assertRegisterEqual(self.MIPS.a1, 0x7fffffffffffffff, "Positive double word load failed")
def test_a2(self):
'''Test load negative double word'''
self.assertRegisterEqual(self.MIPS.a2, 0xffffffffffffffff, "Negative double word load failed")
def test_pos_offset(self):
'''Test double word load at positive offset'''
self.assertRegisterEqual(self.MIPS.a3, 2, "Double word load at positive offset failed")
def test_neg_offset(self):
'''Test double word load at negative offset'''
self.assertRegisterEqual(self.MIPS.a4, 1, "Double word load at negative offset failed")
| 41.098039
| 102
| 0.739504
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.