text
stringlengths 2
999k
|
|---|
import urwid
class AlwaysFocusedEdit(urwid.Edit):
"""
This Edit widget is convinced that it is always in focus. This is so that
it will respond to input events even if it isn't.'
"""
def render(self, size, focus=False):
return super(AlwaysFocusedEdit, self).render(size, focus=True)
|
#!/usr/bin/env python
"""
Copyright 2019 Kubeinit (kubeinit.com).
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from logging import CRITICAL, disable
disable(CRITICAL)
urls = {
'': (
'/fixed_sidebar',
'/fixed_footer',
'/plain_page',
'/page_403',
'/page_404',
'/page_500'
),
'/home': (
'/index',
'/index2',
'/index3'
),
'/forms': (
'/form',
'/form_advanced',
'/form_validation',
'/form_wizards',
'/form_upload',
'/form_buttons'
),
'/ui': (
'/general_elements',
'/media_gallery',
'/typography',
'/icons',
'/glyphicons',
'/widgets',
'/invoice',
'/inbox',
'/calendar'
),
'/tables': (
'/tables',
'/tables_dynamic'
),
'/data': (
'/chartjs',
'/chartjs2',
'/morisjs',
'/echarts',
'/other_charts'
),
'/additional': (
'/ecommerce',
'/projects',
'/project_detail',
'/contacts',
'/profile',
'/pricing'
)
}
free_access = {'/', '/login', '/page_403', '/page_404', '/page_500'}
def check_pages(*pages):
"""
Test the base app.
This is method function
"""
def decorator(function):
def wrapper(user_client):
function(user_client)
for page in pages:
r = user_client.get(page, follow_redirects=True)
print(r)
# assert r.status_code == 200
assert True
return wrapper
return decorator
def check_blueprints(*blueprints):
"""
Test the base app.
This is method function
"""
def decorator(function):
def wrapper(user_client):
function(user_client)
for blueprint in blueprints:
for page in urls[blueprint]:
r = user_client.get(blueprint + page,
follow_redirects=True)
print(r)
# assert r.status_code == 200
assert True
return wrapper
return decorator
# Base test
# test the login system: login, user creation, logout
# test that all pages respond with HTTP 403 if not logged in, 200 otherwise
def test_authentication(base_client):
"""
Test the base app.
This is method function
"""
for blueprint, pages in urls.items():
for page in pages:
page_url = blueprint + page
expected_code = 200 if page_url in free_access else 403
r = base_client.get(page_url, follow_redirects=True)
print(expected_code)
print(r)
# assert r.status_code == expected_code
assert True
def test_urls(user_client):
"""
Test the base app.
This is method function
"""
for blueprint, pages in urls.items():
for page in pages:
page_url = blueprint + page
r = user_client.get(page_url, follow_redirects=True)
print(r)
# assert r.status_code == 200
assert True
# logout and test that we cannot access anything anymore
r = user_client.get('/logout', follow_redirects=True)
test_authentication(user_client)
|
#!/usr/bin/python3
# FILE: robot2.py
# PURPOSE: Test reading distance sensor and ultrasonic sensor
from easygopigo3 import EasyGoPiGo3
import time
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(funcName)s: %(message)s')
DIODE_DROP = 0.7
ULTRASONIC_CORRECTION_AT_100mm = 17.0 # mm
ToF_CORRECTION_AT_100mm = -5.0 # mm
def main():
egpg = EasyGoPiGo3(use_mutex=True)
egpg.ds = egpg.init_distance_sensor()
egpg.us = egpg.init_ultrasonic_sensor(port="AD2")
while True:
try:
vBatt = egpg.volt()+DIODE_DROP
dist_ds_mm = egpg.ds.read_mm()+ToF_CORRECTION_AT_100mm
time.sleep(0.01)
dist_us_mm = egpg.us.read_mm()+ULTRASONIC_CORRECTION_AT_100mm
logging.info(": vBatt:{:>5.2f}v ds:{:>5.0f}mm us:{:>5.0f}mm".format(vBatt,dist_ds_mm,dist_us_mm))
time.sleep(0.075)
except KeyboardInterrupt:
print("\nExiting...")
break
if __name__ == "__main__":
main()
|
import streamlit as st
import pandas as pd
import yaml
import duolingo
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
from datetime import timezone, timedelta
matplotlib.rcParams['font.family'] = ['Source Han Sans CN']
with open("duo_credentials.yaml", 'r') as stream:
creds = yaml.safe_load(stream)
lingo = duolingo.Duolingo(creds['username'], creds['password'])
st.write("Hello :wave: " + lingo.get_user_info()['username'])
streak = lingo.get_streak_info()
xp = lingo.get_daily_xp_progress()
st.header("Calendar")
cal = lingo.get_calendar('zs')
cal_df = pd.DataFrame.from_records(cal)
# creating new datetime-based features
# cal_df['timestamp'] = cal_df['datetime'].apply(lambda x: pytz.timezone("America/New_York").localize(pd.to_datetime(x, unit='ms'), is_dst=None))
cal_df['timestamp'] = cal_df['datetime'].apply(lambda x: pd.to_datetime(x, unit='ms') - timedelta(hours=4))
cal_df['year'] = cal_df.timestamp.dt.year
cal_df['month'] = cal_df.timestamp.dt.month
cal_df['hour'] = cal_df.timestamp.dt.hour
cal_df['weekday'] = cal_df.timestamp.dt.day_name()
cal_df['week_num'] = cal_df['timestamp'].apply(lambda x: x.isocalendar()[1] % 52)
# get weekday_num in order of MTWTFSS because we want to sort the rows of the heatmap in order
weekday_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
mapping = {k: v for k, v in zip(weekday_order, [i+1 for i in range(7)])}
cal_df['weekday_num'] = cal_df['weekday'].apply(lambda x: mapping[x])
# st.dataframe(cal_df)
df_to_pivot = cal_df[['week_num', 'weekday_num', 'improvement']]
pivoted_data = pd.pivot_table(df_to_pivot, values='improvement', index=['weekday_num'], columns=['week_num'], aggfunc=sum)
pivoted_data = pivoted_data.reindex([i+1 for i in range(max(pivoted_data.columns))], axis=1)
pivoted_data.dropna(axis=1, how='all', inplace=True)
# st.dataframe(pivoted_data)
fig = plt.figure(figsize=(6,4));
sns.heatmap(pivoted_data, linewidths=6, cmap='BuGn', cbar=True,
linecolor='white', square=True, yticklabels=weekday_order);
# xticklabels=[*space, 'Jan', *space, 'Feb', *space, 'Mar', *space, 'Apr',
# *space, 'May', *space, 'Jun', *space, 'Jul']);
plt.ylabel("");
plt.xlabel("");
st.write(fig)
# cal_df.sort_values(by='datetime', ascending=False, inplace=True)
# cal_df['datetime'] = cal_df['datetime'].apply(lambda x: pd.to_datetime(x, unit='ms').date())
# fig = plt.figure(figsize=(10,6))
# ax = sns.barplot(data=cal_df, x='datetime', y='improvement', estimator=sum, ci=None)
# st.write(fig)
st.header("Language Details")
ld = lingo.get_language_details('Chinese')
lp = lingo.get_language_progress('zs')
st.write("Streak: ", ld['streak'], " :fire:")
st.write("Total points: ", ld['points'], " 📈")
st.write("Skills learned: ", lp['num_skills_learned'], " :seedling:")
st.write("Current level: ", ld['level'], " 🤓")
st.write('Progress towards next level: ', lp['level_progress'], '/', lp['level_points'])
st.progress(lp['level_percent'])
st.header('Known Topics')
st.write(', '.join(lingo.get_known_topics('zs')))
st.header('Known Words')
st.write(', '.join(lingo.get_known_words('zs')))
|
# -*- coding: utf-8 -*-
"""
Functions used to format and clean any intermediate results loaded in or
returned by a bigfish method.
"""
import numpy as np
from scipy import ndimage as ndi
from .utils import check_array, check_parameter, get_offset_value
from skimage.measure import regionprops, find_contours
from skimage.draw import polygon_perimeter
# ### Transcription sites ###
def remove_transcription_site(mask_nuc, spots_in_foci, foci):
"""We define a transcription site as a foci detected in the nucleus.
Parameters
----------
mask_nuc : np.ndarray, bool
Binary mask of the nuclei with shape (y, x).
spots_in_foci : np.ndarray, np.int64
Coordinate of the spots detected inside foci, with shape (nb_spots, 4).
One coordinate per dimension (zyx coordinates) plus the index of the
foci.
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5). One coordinate per dimension for the
foci centroid (zyx coordinates), the number of RNAs detected in the
foci and its index.
Returns
-------
spots_in_foci_cleaned : np.ndarray, np.int64
Coordinate of the spots detected inside foci, with shape (nb_spots, 4).
One coordinate per dimension (zyx coordinates) plus the index of the
foci. Transcription sites are removed.
foci_cleaned : np.ndarray, np.int64
Array with shape (nb_foci, 5). One coordinate per dimension for the
foci centroid (zyx coordinates), the number of RNAs detected in the
foci and its index. Transcription sites are removed.
"""
# check parameters
check_array(mask_nuc,
ndim=2,
dtype=[bool],
allow_nan=False)
check_array(spots_in_foci,
ndim=2,
dtype=[np.int64],
allow_nan=False)
check_array(foci,
ndim=2,
dtype=[np.int64],
allow_nan=False)
# remove foci inside nuclei
mask_transcription_site = mask_nuc[foci[:, 1], foci[:, 2]]
foci_cleaned = foci[~mask_transcription_site]
# filter spots in transcription sites
spots_to_keep = foci_cleaned[:, 4]
mask_spots_to_keep = np.isin(spots_in_foci[:, 3], spots_to_keep)
spots_in_foci_cleaned = spots_in_foci[mask_spots_to_keep]
return spots_in_foci_cleaned, foci_cleaned
# ### Cell extraction ###
def extract_spots_from_frame(spots, z_lim=None, y_lim=None, x_lim=None):
"""Get spots coordinates within a given frame.
Parameters
----------
spots : np.ndarray, np.int64
Coordinate of the spots detected inside foci, with shape (nb_spots, 3)
or (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus
the index of the foci if necessary.
z_lim : tuple[int, int]
Minimum and maximum coordinate of the frame along the z axis.
y_lim : tuple[int, int]
Minimum and maximum coordinate of the frame along the y axis.
x_lim : tuple[int, int]
Minimum and maximum coordinate of the frame along the x axis.
Returns
-------
extracted_spots : np.ndarray, np.int64
Coordinate of the spots detected inside foci, with shape (nb_spots, 3)
or (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus
the index of the foci if necessary.
"""
# check parameters
check_array(spots,
ndim=2,
dtype=[np.int64],
allow_nan=False)
check_parameter(z_lim=(tuple, type(None)),
y_lim=(tuple, type(None)),
x_lim=(tuple, type(None)))
# extract spots
extracted_spots = spots.copy()
if z_lim is not None:
extracted_spots = extracted_spots[extracted_spots[:, 0] < z_lim[1]]
extracted_spots = extracted_spots[z_lim[0] < extracted_spots[:, 0]]
extracted_spots[:, 0] -= z_lim[0]
if y_lim is not None:
extracted_spots = extracted_spots[extracted_spots[:, 1] < y_lim[1]]
extracted_spots = extracted_spots[y_lim[0] < extracted_spots[:, 1]]
extracted_spots[:, 1] -= y_lim[0]
if x_lim is not None:
extracted_spots = extracted_spots[extracted_spots[:, 2] < x_lim[1]]
extracted_spots = extracted_spots[x_lim[0] < extracted_spots[:, 2]]
extracted_spots[:, 2] -= x_lim[0]
return extracted_spots
def extract_coordinates_image(cyt_labelled, nuc_labelled, spots_out, spots_in,
foci):
"""Extract relevant coordinates from an image, based on segmentation and
detection results.
For each cell in an image we return the coordinates of the cytoplasm, the
nucleus, the RNA spots and information about the detected foci. We extract
2-d coordinates for the cell and 3-d coordinates for the spots and foci.
Parameters
----------
cyt_labelled : np.ndarray, np.uint or np.int
Labelled cytoplasms image with shape (y, x).
nuc_labelled : np.ndarray, np.uint or np.int
Labelled nuclei image with shape (y, x).
spots_out : np.ndarray, np.int64
Coordinate of the spots detected outside foci, with shape
(nb_spots, 4). One coordinate per dimension (zyx coordinates) plus a
default index (-1 for mRNAs spotted outside a foci).
spots_in : np.ndarray, np.int64
Coordinate of the spots detected inside foci, with shape (nb_spots, 4).
One coordinate per dimension (zyx coordinates) plus the index of the
foci.
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5). One coordinate per dimension for the
foci centroid (zyx coordinates), the number of RNAs detected in the
foci and its index.
Returns
-------
results : List[(cyt_coord, nuc_coord, rna_coord, cell_foci, cell)]
- cyt_coord : np.ndarray, np.int64
Coordinates of the cytoplasm border with shape (nb_points, 2).
- nuc_coord : np.ndarray, np.int64
Coordinates of the nuclei border with shape (nb_points, 2).
- rna_coord : np.ndarray, np.int64
Coordinates of the RNA spots with shape (nb_spots, 4). One
coordinate per dimension (zyx dimension), plus the index of a
potential foci.
- cell_foci : np.ndarray, np.int64
Array with shape (nb_foci, 5). One coordinate per dimension for the
foci centroid (zyx coordinates), the number of RNAs detected in the
foci and its index.
- cell : Tuple[int]
Box coordinate of the cell in the original image (min_y, min_x,
max_y and max_x).
"""
# check parameters
check_array(cyt_labelled,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64],
allow_nan=True)
check_array(nuc_labelled,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64],
allow_nan=True)
check_array(spots_out,
ndim=2,
dtype=[np.int64],
allow_nan=False)
check_array(spots_in,
ndim=2,
dtype=[np.int64],
allow_nan=False)
check_array(foci,
ndim=2,
dtype=[np.int64],
allow_nan=False)
# initialize results
results = []
borders = np.zeros(cyt_labelled.shape, dtype=bool)
borders[:, 0] = True
borders[0, :] = True
borders[:, cyt_labelled.shape[1] - 1] = True
borders[cyt_labelled.shape[0] - 1, :] = True
cells = regionprops(cyt_labelled)
for cell in cells:
# get information about the cell
label = cell.label
(min_y, min_x, max_y, max_x) = cell.bbox
# get masks of the cell
cyt = cyt_labelled.copy()
cyt = (cyt == label)
nuc = nuc_labelled.copy()
nuc = (nuc == label)
# check if cell is not cropped by the borders
if _check_cropped_cell(cyt, borders):
continue
# check if nucleus is in the cytoplasm
if not _check_nucleus_in_cell(cyt, nuc):
continue
# get boundaries coordinates
cyt_coord, nuc_coord = _get_boundaries_coordinates(cyt, nuc)
# filter foci
foci_cell, spots_in_foci_cell = _extract_foci(foci, spots_in, cyt)
# get rna coordinates
spots_out_foci_cell = _extract_spots_outside_foci(cyt, spots_out)
rna_coord = np.concatenate([spots_out_foci_cell, spots_in_foci_cell],
axis=0)
# filter cell without enough spots
if len(rna_coord) < 30:
continue
# initialize cell coordinates
cyt_coord[:, 0] -= min_y
cyt_coord[:, 1] -= min_x
nuc_coord[:, 0] -= min_y
nuc_coord[:, 1] -= min_x
rna_coord[:, 1] -= min_y
rna_coord[:, 2] -= min_x
foci_cell[:, 1] -= min_y
foci_cell[:, 2] -= min_x
results.append((cyt_coord, nuc_coord, rna_coord, foci_cell, cell.bbox))
return results
def _check_cropped_cell(cell_cyt_mask, border_frame):
"""
Check if a cell is cropped by the border frame.
Parameters
----------
cell_cyt_mask : np.ndarray, bool
Binary mask of the cell cytoplasm.
border_frame : np.ndarray, bool
Binary mask of the border frame.
Returns
-------
_ : bool
True if cell is cropped.
"""
# check cell is not cropped by the borders
crop = cell_cyt_mask & border_frame
if np.any(crop):
return True
else:
return False
def _check_nucleus_in_cell(cell_cyt_mask, cell_nuc_mask):
"""
Check if the nucleus is properly contained in the cell cytoplasm.
Parameters
----------
cell_cyt_mask : np.ndarray, bool
Binary mask of the cell cytoplasm.
cell_nuc_mask : np.ndarray, bool
Binary mask of the nucleus cytoplasm.
Returns
-------
_ : bool
True if the nucleus is in the cell.
"""
diff = cell_cyt_mask | cell_nuc_mask
if np.any(diff != cell_cyt_mask):
return False
else:
return True
def _get_boundaries_coordinates(cell_cyt_mask, cell_nuc_mask):
"""
Find boundaries coordinates for cytoplasm and nucleus.
Parameters
----------
cell_cyt_mask : np.ndarray, bool
Mask of the cell cytoplasm.
cell_nuc_mask : np.ndarray, bool
Mask of the cell nucleus.
Returns
-------
cyt_coord : np.ndarray, np.int64
Coordinates of the cytoplasm in 2-d (yx dimension).
nuc_coord : np.ndarray, np.int64
Coordinates of the nucleus in 2-d (yx dimension).
"""
cyt_coord = np.array([], dtype=np.int64).reshape((0, 2))
nuc_coord = np.array([], dtype=np.int64).reshape((0, 2))
# cyt coordinates
cell_cyt_coord = find_contours(cell_cyt_mask, level=0)
if len(cell_cyt_coord) == 0:
pass
elif len(cell_cyt_coord) == 1:
cyt_coord = cell_cyt_coord[0].astype(np.int64)
else:
m = 0
for coord in cell_cyt_coord:
if len(coord) > m:
m = len(coord)
cyt_coord = coord.astype(np.int64)
# nuc coordinates
cell_nuc_coord = find_contours(cell_nuc_mask, level=0)
if len(cell_nuc_coord) == 0:
pass
elif len(cell_nuc_coord) == 1:
nuc_coord = cell_nuc_coord[0].astype(np.int64)
else:
m = 0
for coord in cell_nuc_coord:
if len(coord) > m:
m = len(coord)
nuc_coord = coord.astype(np.int64)
return cyt_coord, nuc_coord
def _extract_foci(foci, spots_in_foci, cell_cyt_mask):
"""
Extract foci and related spots detected in a specific cell.
Parameters
----------
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5). One coordinate per dimension for the
foci centroid (zyx coordinates), the number of RNAs detected in the
foci and its index.
spots_in_foci : : np.ndarray, np.int64
Coordinate of the spots detected inside foci, with shape (nb_spots, 4).
One coordinate per dimension (zyx coordinates) plus the index of the
foci.
cell_cyt_mask : np.ndarray, bool
Binary mask of the cell with shape (y, x).
Returns
-------
spots_in_foci_cell : np.ndarray, np.int64
Coordinate of the spots detected inside foci in the cell, with shape
(nb_spots, 4). One coordinate per dimension (zyx coordinates) plus the
index of the foci.
foci_cell : np.ndarray, np.int64
Array with shape (nb_foci, 5). One coordinate per dimension for the
foci centroid (zyx coordinates), the number of RNAs detected in the
foci and its index.
"""
# filter foci
mask_foci_cell = cell_cyt_mask[foci[:, 1], foci[:, 2]]
if mask_foci_cell.sum() == 0:
foci_cell = np.array([], dtype=np.int64).reshape((0, 5))
spots_in_foci_cell = np.array([], dtype=np.int64).reshape((0, 4))
return foci_cell, spots_in_foci_cell
foci_cell = foci[mask_foci_cell]
# filter spots in foci
spots_to_keep = foci_cell[:, 4]
mask_spots_to_keep = np.isin(spots_in_foci[:, 3], spots_to_keep)
spots_in_foci_cell = spots_in_foci[mask_spots_to_keep]
return foci_cell, spots_in_foci_cell
def _extract_spots_outside_foci(cell_cyt_mask, spots_out_foci):
"""
Extract spots detected outside foci, in a specific cell.
Parameters
----------
cell_cyt_mask : np.ndarray, bool
Binary mask of the cell with shape (y, x).
spots_out_foci : np.ndarray, np.int64
Coordinate of the spots detected outside foci, with shape
(nb_spots, 4). One coordinate per dimension (zyx coordinates) plus a
default index (-1 for mRNAs spotted outside a foci).
Returns
-------
spots_out_foci_cell : np.ndarray, np.int64
Coordinate of the spots detected outside foci in the cell, with shape
(nb_spots, 4). One coordinate per dimension (zyx coordinates) plus the
index of the foci.
"""
# get coordinates of rna outside foci
mask_spots_to_keep = cell_cyt_mask[spots_out_foci[:, 1],
spots_out_foci[:, 2]]
spots_out_foci_cell = spots_out_foci[mask_spots_to_keep]
return spots_out_foci_cell
# ### Segmentation postprocessing ###
# TODO add from_binary_surface_to_binary_boundaries
def center_binary_mask(cyt, nuc=None, rna=None):
"""Center a 2-d binary mask (surface or boundaries) and pad it.
One mask should be at least provided ('cyt'). If others masks are provided
('nuc' and 'rna'), they will be transformed like the main mask. All the
provided masks should have the same shape. If others coordinates are
provided, the values will be transformed, but an array of coordinates with
the same format is returned
Parameters
----------
cyt : np.ndarray, np.uint or np.int or bool
Binary image of cytoplasm with shape (y, x).
nuc : np.ndarray, np.uint or np.int or bool
Binary image of nucleus with shape (y, x) or array of nucleus
coordinates with shape (nb_points, 2).
rna : np.ndarray, np.uint or np.int or bool
Binary image of mRNAs localization with shape (y, x) or array of mRNAs
coordinates with shape (nb_points, 2) or (nb_points, 3).
Returns
-------
cyt_centered : np.ndarray, np.uint or np.int or bool
Centered binary image of cytoplasm with shape (y, x).
nuc_centered : np.ndarray, np.uint or np.int or bool
Centered binary image of nucleus with shape (y, x).
rna_centered : np.ndarray, np.uint or np.int or bool
Centered binary image of mRNAs localizations with shape (y, x).
"""
# check parameters
check_array(cyt,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
if nuc is not None:
check_array(nuc,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
if rna is not None:
check_array(rna,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
# initialize parameter
nuc_centered, rna_centered = None, None
marge = get_offset_value()
# center the binary mask of the cell
coord = np.nonzero(cyt)
coord = np.column_stack(coord)
min_y, max_y = coord[:, 0].min(), coord[:, 0].max()
min_x, max_x = coord[:, 1].min(), coord[:, 1].max()
shape_y = max_y - min_y + 1
shape_x = max_x - min_x + 1
cyt_centered_shape = (shape_y + 2 * marge, shape_x + 2 * marge)
cyt_centered = np.zeros(cyt_centered_shape, dtype=bool)
crop = cyt[min_y:max_y + 1, min_x:max_x + 1]
cyt_centered[marge:shape_y + marge, marge:shape_x + marge] = crop
# center the binary mask of the nucleus with the same transformation
if nuc is not None:
if nuc.shape == 2:
nuc_centered = nuc.copy()
nuc_centered[:, 0] = nuc_centered[:, 0] - min_y + marge
nuc_centered[:, 1] = nuc_centered[:, 1] - min_x + marge
elif nuc.shape == cyt.shape:
nuc_centered = np.zeros(cyt_centered_shape, dtype=bool)
crop = nuc[min_y:max_y + 1, min_x:max_x + 1]
nuc_centered[marge:shape_y + marge, marge:shape_x + marge] = crop
else:
raise ValueError("mRNAs mask should have the same shape than "
"cytoplasm mask and coordinates should be in 2-d")
# center the binary mask of the mRNAs with the same transformation
if rna is not None:
if rna.shape[1] == 3:
rna_centered = rna.copy()
rna_centered[:, 1] = rna_centered[:, 1] - min_y + marge
rna_centered[:, 2] = rna_centered[:, 2] - min_x + marge
elif rna.shape[1] == 2:
rna_centered = rna.copy()
rna_centered[:, 0] = rna_centered[:, 0] - min_y + marge
rna_centered[:, 1] = rna_centered[:, 1] - min_x + marge
elif rna.shape == cyt.shape:
rna_centered = np.zeros(cyt_centered_shape, dtype=bool)
crop = rna[min_y:max_y + 1, min_x:max_x + 1]
rna_centered[marge:shape_y + marge, marge:shape_x + marge] = crop
else:
raise ValueError("mRNAs mask should have the same shape than "
"cytoplasm mask and coordinates should be in 2-d "
"or 3-d")
return cyt_centered, nuc_centered, rna_centered
def from_surface_to_coord(binary_surface):
"""Extract coordinates from a 2-d binary matrix.
The resulting coordinates represent the external boundaries of the object.
Parameters
----------
binary_surface : np.ndarray, np.uint or np.int or bool
Binary image with shape (y, x).
Returns
-------
coord : np.ndarray, np.int64
Array of boundaries coordinates with shape (nb_points, 2).
"""
# check parameters
check_array(binary_surface,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
# from binary surface to 2D coordinates boundaries
coord = find_contours(binary_surface, level=0)[0].astype(np.int64)
return coord
def complete_coord_boundaries(coord):
"""Complete a 2-d coordinates array, by generating/interpolating missing
points.
Parameters
----------
coord : np.ndarray, np.int64
Array of coordinates to complete, with shape (nb_points, 2).
Returns
-------
coord_completed : np.ndarray, np.int64
Completed coordinates arrays, with shape (nb_points, 2).
"""
# check parameters
check_array(coord,
ndim=2,
dtype=[np.int64])
# for each array in the list, complete its coordinates using the scikit
# image method 'polygon_perimeter'
coord_y, coord_x = polygon_perimeter(coord[:, 0], coord[:, 1])
coord_y = coord_y[:, np.newaxis]
coord_x = coord_x[:, np.newaxis]
coord_completed = np.concatenate((coord_y, coord_x), axis=-1)
return coord_completed
def _from_coord_to_boundaries(coord_cyt, coord_nuc=None, coord_rna=None):
"""Convert 2-d coordinates to a binary matrix with the boundaries of the
object.
As we manipulate the coordinates of the external boundaries, the relative
binary matrix has two extra pixels in each dimension. We compensate by
reducing the marge by one in order to keep the same shape for the frame.
If others coordinates are provided, the relative binary matrix is build
with the same shape as the main coordinates.
Parameters
----------
coord_cyt : np.ndarray, np.int64
Array of cytoplasm boundaries coordinates with shape (nb_points, 2).
coord_nuc : np.ndarray, np.int64
Array of nucleus boundaries coordinates with shape (nb_points, 2).
coord_rna : np.ndarray, np.int64
Array of mRNAs coordinates with shape (nb_points, 2) or
(nb_points, 3).
Returns
-------
cyt : np.ndarray, np.uint or np.int or bool
Binary image of cytoplasm boundaries with shape (y, x).
nuc : np.ndarray, np.uint or np.int or bool
Binary image of nucleus boundaries with shape (y, x).
rna : np.ndarray, np.uint or np.int or bool
Binary image of mRNAs localizations with shape (y, x).
"""
# initialize parameter
nuc, rna = None, None
marge = get_offset_value()
marge -= 1
# from 2D coordinates boundaries to binary boundaries
max_y = coord_cyt[:, 0].max()
max_x = coord_cyt[:, 1].max()
min_y = coord_cyt[:, 0].min()
min_x = coord_cyt[:, 1].min()
shape_y = max_y - min_y + 1
shape_x = max_x - min_x + 1
image_shape = (shape_y + 2 * marge, shape_x + 2 * marge)
coord_cyt[:, 0] = coord_cyt[:, 0] - min_y + marge
coord_cyt[:, 1] = coord_cyt[:, 1] - min_x + marge
cyt = np.zeros(image_shape, dtype=bool)
cyt[coord_cyt[:, 0], coord_cyt[:, 1]] = True
# transform nucleus coordinates with the same parameters
if coord_nuc is not None:
nuc = np.zeros(image_shape, dtype=bool)
coord_nuc[:, 0] = coord_nuc[:, 0] - min_y + marge
coord_nuc[:, 1] = coord_nuc[:, 1] - min_x + marge
nuc[coord_nuc[:, 0], coord_nuc[:, 1]] = True
# transform mRNAs coordinates with the same parameters
if coord_rna is not None:
rna = np.zeros(image_shape, dtype=bool)
if coord_rna.shape[1] == 3:
coord_rna[:, 1] = coord_rna[:, 1] - min_y + marge
coord_rna[:, 2] = coord_rna[:, 2] - min_x + marge
rna[coord_rna[:, 1], coord_rna[:, 2]] = True
else:
coord_rna[:, 0] = coord_rna[:, 0] - min_y + marge
coord_rna[:, 1] = coord_rna[:, 1] - min_x + marge
rna[coord_rna[:, 0], coord_rna[:, 1]] = True
return cyt, nuc, rna
def from_boundaries_to_surface(binary_boundaries):
"""Fill in the binary matrix representing the boundaries of an object.
Parameters
----------
binary_boundaries : np.ndarray, np.uint or np.int or bool
Binary image with shape (y, x).
Returns
-------
binary_surface : np.ndarray, np.uint or np.int or bool
Binary image with shape (y, x).
"""
# TODO check dtype input & output
# check parameters
check_array(binary_boundaries,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
# from binary boundaries to binary surface
binary_surface = ndi.binary_fill_holes(binary_boundaries)
return binary_surface
def from_coord_to_surface(coord_cyt, coord_nuc=None, coord_rna=None):
"""Convert 2-d coordinates to a binary matrix with the surface of the
object.
As we manipulate the coordinates of the external boundaries, the relative
binary matrix has two extra pixels in each dimension. We compensate by
keeping only the inside pixels of the object surface.
If others coordinates are provided, the relative binary matrix is build
with the same shape as the main coordinates.
Parameters
----------
coord_cyt : np.ndarray, np.int64
Array of cytoplasm boundaries coordinates with shape (nb_points, 2).
coord_nuc : np.ndarray, np.int64
Array of nucleus boundaries coordinates with shape (nb_points, 2).
coord_rna : np.ndarray, np.int64
Array of mRNAs coordinates with shape (nb_points, 2) or
(nb_points, 3).
Returns
-------
cyt_surface : np.ndarray, np.uint or np.int or bool
Binary image of cytoplasm surface with shape (y, x).
nuc_surface : np.ndarray, np.uint or np.int or bool
Binary image of nucleus surface with shape (y, x).
rna : np.ndarray, np.uint or np.int or bool
Binary image of mRNAs localizations with shape (y, x).
"""
# check parameters
check_array(coord_cyt,
ndim=2,
dtype=[np.int64])
if coord_nuc is not None:
check_array(coord_nuc,
ndim=2,
dtype=[np.int64])
if coord_rna is not None:
check_array(coord_rna,
ndim=2,
dtype=[np.int64])
# from coordinates to binary boundaries
cyt, nuc, rna = _from_coord_to_boundaries(coord_cyt, coord_nuc, coord_rna)
# from binary boundaries to binary surface
cyt_surface = from_boundaries_to_surface(cyt)
nuc_surface = from_boundaries_to_surface(nuc)
return cyt_surface, nuc_surface, rna
|
from .waymo import WaymoDataset
from .waymo_common import *
__all__ = ["WaymoDataset"]
|
#!/usr/bin/env python
import Bio
from Bio.KEGG import REST
from Bio.KEGG import Enzyme
import re
from Bio.KEGG import Compound
import gzip
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
def create_enzyme_df(path_to_file):
"""
input:path_to_file. file.gz format
output:enzyme dataframe
"""
enzyme_fields = [method for method in dir(Enzyme.Record()) if not method.startswith('_')]
data_matrix = []
with gzip.open(path_to_file, 'rt') as file:
for record in enzyme.parse(file):
data_matrix.append([getattr(record, field) for field in enzyme_fields])
enzyme_df = pd.DataFrame(data_matrix, columns=enzyme_fields)
return enzyme_df
def get_compact_promiscuous_df(enzyme_df):
"""
input:enzyme dataframe (dataframe)
output:promiscuous enzyme dataframe (dataframe)
"""
promiscuous_df = enzyme_df[[True if len(rxn) > 1 else False for rxn in enzyme_df['reaction']]]
compact_promiscuous_df = promiscuous_df[['entry','reaction','product','substrate']]
return compact_promiscuous_df
def get_reaction_list(df):
"""
get the list of reaction from a dataframe that contains reaction column
input:dataframe with reaction column (df)
output: list of reaction (list)
"""
reaction_list = []
for index,row in df.iterrows():
for reaction in row['reaction']:
reaction_split = reaction.split("[RN:")[-1]
if reaction_split.startswith("R") and not reaction_split.startswith("RN"):
for i in reaction_split[:-1].split(" "):
reaction_list.append(i)
return reaction_list
def query_reversible_reaction(reaction_list):
"""
get the list of reversible reaction
input:list of reactions(list) eg)["R00709"]
output:list of reversible reactions(list)
"""
reversible_reaction = []
for reaction in reaction_list:
reaction_file = REST.kegg_get(reaction).read()
for i in reaction_file.rstrip().split("\n"):
if i.startswith("EQUATION") and "<=>" in i:
reversible_reaction.append(reaction)
return reversible_reaction
def combine_substrate_product(df):
"""
append substrates to product column.
should not be run multiple times.
it will append substrates multiple times
input:dataframe with substrate and product(df)
output:dataframe with combined substrate and product. named under product column(df)
"""
rowindex = np.arange(0,len(df))
df_with_ordered_index = df.set_index(rowindex)
newdf = df_with_ordered_index
for index,row in df_with_ordered_index.iterrows():
productlist = row['product']
substratelist = row['substrate']
newdf.iloc[index,2] = productlist + substratelist
return newdf[["entry","product"]]
def get_cofactor_list(cofactor_df,CPDcolumnname):
"""
<input>
cofactor_df : cofactor dataframe(df)
CPDcolumnname : name of CPD columnname from cofactor dataframe(str)
<output>
cofactor_list : list of cofactors from cofactor dataframe (list)
"""
cofactor_list = [cofactor[4:10] for cofactor in cofactor_df[CPDcolumnname]]
return cofactor_list
def get_cpd_id(compound_full):
"""
input:compound_full = compound full name (str) eg) 'oxalureate [CPD:C00802]'
output: cpd = cpd id (str) eg) 'C01007'
"""
cpd = compound_full[-7:-1]
return cpd
def rm_cofactor_only_cpd(enzyme_df,cofactor_list,compound_columnname="product",keepNA=True):
"""
<input>
enzyme_df : dataframe with enzyme information. should have substrate and product combined(df)
compound_columnname : name of the column with compounds (str)
cofactor_list : list of cofactors to be removed (list)
keepNA : if false, will drop the row with no compounds (boolean, default:True)
<output>
clean dataframe (df)
"""
newdf = enzyme_df.drop(["product"],axis=1)
cleaned_compound_column = []
for index,row in enzyme_df.iterrows():
cpd_compound_list =[]
for compound in row[compound_columnname]:
if "CPD" in compound:
onlycpd = get_cpd(compound)
if onlycpd not in cofactor_list:
cpd_compound_list.append(onlycpd)
else:
pass
if len(cpd_compound_list)==0:
cleaned_compound_column.append("NA")
else:
cleaned_compound_column.append(cpd_compound_list)
newdf['product'] = cleaned_compound_column
if keepNA==False:
newdf = newdf.loc[cleaned_df_productinList['product']!='NA']
return newdf
def itemlist_eachrow(df,oldcolumnname,newcolumnname,sorting_column):
"""
<input>
df: dataframe with list items in one column (dataframe)
oldcolumnname : name of the old column to be replaced (str) eg)"products"
newcolumnname : name of the new column to replace (str) eg)"product"
sorting_column : name of the column to be sorted by (str) eg)"entry"
<output>
dataframe with each item in each row.
"""
newdf = df[oldcolumnname].\
apply(pd.Series).\
merge(df, left_index=True, right_index=True).\
drop([oldcolumnname],axis=1).\
melt(id_vars=[enzymecolumn],value_name=newcolumnname).\
sort_values(by=[sorting_column]).\
dropna().\
drop(columns=["variable"])
return newdf
def compound_records_to_df(file_path):
"""
Function parses all records using Biopython.Bio.KEGG.Compound parser, and returns a pandas dataframe.
<Input>
filepath = file path to a gzipped text file of KEGG enzyme records (str)
<output>
compound dataframe
"""
compound_fields = [method for method in dir(Compound.Record()) if not method.startswith('_')]
data_matrix = []
with gzip.open(file_path, 'rt') as file:
for record in Compound.parse(file):
data_matrix.append([getattr(record, field) for field in compound_fields])
compound_df = pd.DataFrame(data_matrix, columns=compound_fields)
return compound_df
def extract_PubChem_id(field):
"""
This function uses regular expressions to extract the PubChem compound IDs from a field in a record
input : field
output : pubchem_id
"""
regex = "'PubChem', \[\'(\d+)\'\]\)" # matches "'PubChem', ['" characters exactly, then captures any number of digits (\d+), before another literal "']" character match
ids = re.findall(regex, str(field), re.IGNORECASE)
if len(ids) > 0:
pubchem_id = ids[0]
else:
pubchem_id = ''
return pubchem_id
|
import pygame
import psutil
import cpuinfo
import socket
import time
import nmap
from cpuinfo import get_cpu_info
red = (200,0,0)
white = (210,214,217)
blue = (0,0,200)
grey = (105,105,105)
black = (0,0,0)
largura_tela, altura_tela = 1024,760
pygame.init()
pygame.font.init()
font = pygame.font.Font(None, 32)
uso = psutil.cpu_percent(interval=1, percpu=True)
tela = pygame.display.set_mode((largura_tela, altura_tela))
ip = socket.gethostbyname(socket.gethostname())
info = get_cpu_info()
address = psutil.net_if_addrs()
p = psutil.Process()
processos = psutil.pids()
menu = ""
menu1 = True
menu2 = True
menu3 = True
p_lista = []
pos = pygame.mouse.get_pos()
buttons = 30
pygame.display.set_caption("TP07 - Monitoramento do PC")
pygame.display.init()
clock = pygame.time.Clock()
def pc_infos():
font = pygame.font.Font(None, 36)
s1 = pygame.surface.Surface((largura_tela, altura_tela/3))
texto_barra = "Detalhes do Processador"
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 10))
font = pygame.font.Font(None, 28)
texto_barra = ('Nome: {}'.format(info['brand_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 50))
texto_barra = ('Arquitetura: {}'.format(info['arch_string_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 90))
texto_barra = ('Palavra (bits): {}'.format(info['bits']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 120))
texto_barra = ('Frequência (MHz): {}'.format(round(psutil.cpu_freq().current, 2)))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 150))
texto_barra = ('Núcleos (Físicos): {} ({})'.format(psutil.cpu_count(), psutil.cpu_count(logical=False)))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 180))
y = 60
for chave in address:
IP = address[chave][1]
addrs = IP[:3]
y+= 30
texto_barra = ('{:12.10}: {} - netmask: {}'.format(chave, addrs[1], addrs[2]))
text = font.render(texto_barra, 1, white)
s1.blit(text, (350, y))
tela.blit(s1, (0, 0))
def cpu_graph():
s2 = pygame.surface.Surface((largura_tela, altura_tela/5))
uso = psutil.cpu_percent(interval=1)
larg = largura_tela - 2*40
pygame.draw.rect(s2, blue, (20, 30, larg, 10))
larg = larg*uso/100
pygame.draw.rect(s2, red, (20, 30, larg, 10))
texto_barra = 'Uso de CPU: {}%'.format(uso)
text = font.render(texto_barra, 1, white)
s2.blit(text, (20, 0))
tela.blit(s2, (0, 250))
def m_graph():
s3 = pygame.surface.Surface((largura_tela, altura_tela/5))
m = psutil.virtual_memory()
larg = largura_tela - 2*40
pygame.draw.rect(s3, blue, (20, 30, larg, 10))
larg = larg*m.percent/100
pygame.draw.rect(s3, red, (20, 30, larg, 10))
total = round(m.total/(1024*1024*1024),2)
texto_barra = 'Uso de Memória: {}% (Total: {} GB)'.format(m.percent, total)
text = font.render(texto_barra, 1, white)
s3.blit(text, (20, 0))
tela.blit(s3, (0, 350))
def disk_graph():
s4 = pygame.surface.Surface((largura_tela, altura_tela/5))
disk = psutil.disk_usage('.')
larg = largura_tela - 2*40
pygame.draw.rect(s4, blue, (20, 30, larg, 10))
larg = larg*disk.percent/100
pygame.draw.rect(s4, red, (20, 30, larg, 10))
total = round(disk.total/(1024*1024*1024), 2)
texto_barra = 'Uso de Disco: {}% (Total: {} GB):'.format(disk.percent,total)
text = font.render(texto_barra, 1, white)
s4.blit(text, (20, 0))
tela.blit(s4, (0, 450))
def threads_graph():
s5 = pygame.surface.Surface((largura_tela, altura_tela))
y = 10
num_cpu = len(uso)
desl = 9
d = y + desl
for i in range(num_cpu):
alt = s5.get_height() - 2*y
larg = (alt - (num_cpu+1)*desl)/num_cpu
pygame.draw.rect(s5, red, (d, y, larg, alt))
pygame.draw.rect(s5, blue, (d, y, larg, (alt*uso[i]/100)))
d = d + larg + desl
tela.blit(s5, (0, 550))
def threads_text():
s5 = pygame.surface.Surface((largura_tela, altura_tela))
texto_barra = 'Uso de Threads:'.format()
text = font.render(texto_barra, 1, white)
s5.blit(text, (20, 0))
tela.blit(s5, (0, 530))
def infos():
s1 = pygame.surface.Surface((largura_tela, altura_tela))
font = pygame.font.Font(None, 36)
texto_barra = "Monitoramento de Uso"
text = font.render(texto_barra, 1, white)
s1.blit(text, (350, 10))
font = pygame.font.Font(None, 28)
texto_barra = ('Nome: {}'.format(info['brand_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 60))
texto_barra = ('Arquitetura: {}'.format(info['arch_string_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 90))
texto_barra = ('Palavra (bits): {}'.format(info['bits']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 120))
texto_barra = ('Frequência (MHz): {}'.format(round(psutil.cpu_freq().current, 2)))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 150))
texto_barra = ('Núcleos (físicos): {} ({})'.format(str(psutil.cpu_count()), str(psutil.cpu_count(logical=False))))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 180))
texto_barra = ('IP Address: {}'.format(ip))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 210))
font = pygame.font.Font(None, 38)
#CPU
uso = psutil.cpu_percent(interval=0)
texto_barra = ('Uso de CPU: {}% Usado'.format(uso))
text = font.render(texto_barra, 1, white)
s1.blit(text, (230, 275))
#MEMORIA
m = psutil.virtual_memory()
total = round(m.total/(1024*1024*1024), 2)
texto_barra = ('Uso de Memória: {}% (Total: {} GB)'.format(m.percent, total))
text = font.render(texto_barra, 1, white)
s1.blit(text, (230, 325))
#HD
disco = psutil.disk_usage('.')
total = round(disco.total/(1024*1024*1024), 2)
texto_barra = ('Uso de Disco: {}% (Total: {})'.format(disco.percent, total))
text = font.render(texto_barra, 1, white)
s1.blit(text, (230, 375))
tela.blit(s1, (0, 0))
#THREADS
uso2 = psutil.cpu_percent(interval=1, percpu=True)
y = 0
x = 0
for i in range(len(uso2)):
texto_barra = ('Uso de Thread {} : {}% Usado'.format(i + 1, uso2[i]))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20+x, 450+y))
tela.blit(s1, (0, 0))
y += 30
if i == 7:
x += 500
y -= 240
def dir_header():
s1 = pygame.surface.Surface((largura_tela, altura_tela/10))
font = pygame.font.Font(None, 36)
texto = '{}'.format("Detalhes de Arquivos/Diretórios")
text = font.render(texto, 1, white)
s1.blit(text, (650, 10))
tela.blit(s1, (0, 0))
def process_header():
s6 = pygame.surface.Surface((largura_tela, altura_tela/8))
font = pygame.font.Font(None, 16)
texto_barra = '{:<6}'.format("PID") + " "
texto_barra = texto_barra + '{:10}'.format("Threads") + " "
texto_barra = texto_barra + '{:30}'.format("Data de Criação") + " "
texto_barra = texto_barra + '{:25}'.format("CPU - UT")
# UT - User Time
# ST - System Time
texto_barra = texto_barra + '{:26}'.format("CPU - ST")
texto_barra = texto_barra + '{:25}'.format("Memory(%)") + " "
texto_barra = texto_barra + '{:10}'.format("RSS") + " "
# Vss = virtual set size
# Rss = resident set size
texto_barra = texto_barra + '{:25}'.format("VMS") + " "
texto_barra = texto_barra + '{:20}'.format("Executável")
text = font.render(texto_barra, 1, white)
s6.blit(text, (20, 80))
tela.blit(s6, (0, 0))
def arq_dir():
s1 = pygame.surface.Surface((largura_tela, altura_tela))
p = psutil.Process()
font = pygame.font.Font(None, 14)
y = 100
for i in processos:
texto_barra = '{:<6}'.format(i) + " "
texto_barra = texto_barra + '{:^12}'.format(p.num_threads()) + " "
texto_barra = texto_barra + '{:26}'.format(time.ctime(p.create_time()))
texto_barra = texto_barra + '{:20.2f}'.format(p.cpu_times().user)
texto_barra = texto_barra + '{:30.2f}'.format(p.cpu_times().system)
texto_barra = texto_barra + '{:30.2f}'.format(p.memory_percent()) + " MB"
rss = p.memory_info().rss/1024/1024
texto_barra = texto_barra + '{:30.2f}'.format(rss) + " MB"
# Vss = virtual set size
# Rss = resident set size
vms = p.memory_info().vms/1024/1024
texto_barra = texto_barra + '{:15.2f}'.format(vms) + " MB" + " "
texto_barra = texto_barra + '{:15}'.format(p.exe())
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, y))
tela.blit(s1, (0, 0))
y+= 15
if y >= 600:
break
# if (i % 3 == 0) and (i % 5 == 0):
# break
def arq_dir_button():
s1 = pygame.surface.Surface((largura_tela, altura_tela))
font = pygame.font.Font(None, 32)
pygame.draw.rect(s1, grey, (20, 30, 125, 30))
texto_barra = "Próximo"
text = font.render(texto_barra, 1, white)
s1.blit(text, (38, 35))
tela.blit(s1, (670, 670))
def menu_init():
s0 = pygame.surface.Surface((largura_tela, altura_tela))
s0.fill(white)
font = pygame.font.Font(None, 50)
texto_barra = ("OPÇOES DE TELA")
text = font.render(texto_barra, 1, black)
s0.blit(text, (350, 20))
tela.blit(s0, (0, 0))
texto_barra = ("Botão esquerdo do mouse - Gráfico de Uso")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 140))
tela.blit(s0, (0, 0))
texto_barra = ("Botão direito do mouse - Monitoramento de Uso Geral")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 260))
tela.blit(s0, (0, 0))
texto_barra = ("ESPAÇO - Detalhes de Arquivos/Diretórios")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 380))
tela.blit(s0, (0, 0))
texto_barra = ("SHIFT - ESCANEAMENTO DE IP")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 500))
tela.blit(s0, (0, 0))
texto_barra = ("TAB - Voltar a Tela Inicial")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 620))
tela.blit(s0, (0, 0))
def ping_ip(host):
s1 = pygame.surface.Surface((largura_tela, altura_tela))
font = pygame.font.Font(None, 32)
nmp = nmap.PortScanner()
nmp.scan(host)
y = 0
for proto in nmp[host].all_protocols():
texto_barra = 'Protocolo : {}'.format(proto)
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 20))
tela.blit(s1, (0, 0))
lport = nmp[host][proto].keys()
for port in lport:
texto_barra = 'Porta: {:<15} Estado: {:>10}'.format(port, nmp[host][proto][port]['state'])
text = font.render(texto_barra, 1, white)
s1.blit(text, (70, 120+y))
tela.blit(s1, (0, 0))
y+= 30
menu_init()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pos_x, pos_y = pygame.mouse.get_pos()
if pos_x >= 691 and pos_x <= 815 and pos_y >= 700 and pos_y <= 730:
buttons += 30
else:
menu = "menu1"
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
menu = "menu2"
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
menu = "menu3"
if event.type == pygame.KEYDOWN and event.key == pygame.K_TAB:
menu = ""
menu_init()
if event.type == pygame.KEYDOWN and event.key == pygame.K_LSHIFT:
ping_ip(ip)
if menu == "menu1":
pc_infos()
cpu_graph()
m_graph()
disk_graph()
threads_text()
threads_graph()
if menu != "menu1":
break
if menu == "menu2":
infos()
if menu != "menu2":
break
if menu == "menu3":
arq_dir()
process_header()
dir_header()
arq_dir_button()
time.sleep(0.1)
if menu != "menu3":
break
pygame.display.update()
clock.tick(50)
pygame.display.quit()
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
class CrawlerItem(scrapy.Item):
url = Field()
html_title = Field()
html_h1 = Field()
html_h2 = Field()
html_h3 = Field()
html_h4 = Field()
html_h5 = Field()
html_h6 = Field()
html_p = Field()
html_a = Field()
|
class Table:
context = ''
fields = ()
columns = ()
sortable = ()
types = ()
def context_dict(self):
return {field: {'field': field,
'column': col,
'sortable': sort,
'type': type_}
for field, col, sort, type_ in zip(self.fields, self.columns, self.sortable, self.types)}
|
from dataclasses import dataclass
from moxom.compiler.lexer import OperatorToken, IdentifierToken, AtomTokens
from typing import Union, Optional
from .cstparser import CstNode, Expr
import ast
from moxom.compiler.operators import operator_dict, AssignOperator, AndOperator, ThenOperator
@dataclass
class AtomNode:
value: [str, int, float]
chain: Union['AtomNode', 'FunctionNode', None] = None
@dataclass
class BinaryNode:
token: OperatorToken
lhs: 'AstNode'
rhs: 'AstNode'
@dataclass
class FunctionNode:
token: IdentifierToken
chain: Union[AtomNode, 'FunctionNode', None] = None
@dataclass
class DeclarationNode:
token: IdentifierToken
arguments: [IdentifierToken]
subroutine: Union['AstNode']
AstNode = Union[AtomNode, BinaryNode, FunctionNode, DeclarationNode]
class AstParser:
def parse(self, cst: CstNode) -> AstNode:
if isinstance(cst.token_or_expr, IdentifierToken):
return FunctionNode(
cst.token_or_expr,
self.parse(cst.right_node) if cst.right_node is not None else None
)
elif type(cst.token_or_expr) in AtomTokens:
value = cst.token_or_expr.value
value = ast.literal_eval(value) if type(value) == str else value
return AtomNode(
value,
self.parse(cst.right_node) if cst.right_node is not None else None
)
elif isinstance(cst.token_or_expr, Expr):
return self.parse(cst.right_node)
elif isinstance(cst.token_or_expr, OperatorToken):
operator = operator_dict[cst.token_or_expr.value]
if operator in [AndOperator, ThenOperator]:
left = self.parse(cst.left_node)
right = self.parse(cst.right_node)
return BinaryNode(cst.token_or_expr, left, right)
elif operator is AssignOperator:
name, arguments = self.parse_signature(cst.left_node)
body = self.parse(cst.right_node)
return DeclarationNode(name, arguments, body)
raise Exception("Not supported token: %s" % cst.token_or_expr)
@dataclass
class FunctionSignature:
name: IdentifierToken
arguments: [IdentifierToken]
def parse_signature(self, cst: CstNode) -> (IdentifierToken, [IdentifierToken]):
return cst.token_or_expr, self.parse_signature_arguments(cst.right_node)
def parse_signature_arguments(self, cst: Optional[CstNode]) -> [IdentifierToken]:
if cst is None:
return []
elif type(cst.token_or_expr) is IdentifierToken:
arguments = [cst.token_or_expr]
return arguments + self.parse_signature_arguments(cst.right_node)
else:
raise Exception("Function signature should contain only identifiers")
|
import numpy
from sklearn.metrics import confusion_matrix
def load_data():
train_labels = []
with open('digitdata/traininglabels', 'rb') as f:
for i, line in enumerate(f):
train_labels.append(int(line))
train_labels = numpy.array(train_labels, dtype=int)
train_x = numpy.zeros((train_labels.shape[0] * 28 * 28))
with open('digitdata/trainingimages', 'rb') as f:
for i, line in enumerate(f):
for j, char in enumerate(line.strip('\n')):
if '+' == char:
train_x[i * 28 + j] = 1
if '#' == char:
train_x[i * 28 + j] = 2
train_x = numpy.array(train_x, dtype=int).reshape((train_labels.shape[0], 28 * 28))
test_labels = []
with open('digitdata/testlabels', 'rb') as f:
for i, line in enumerate(f):
test_labels.append(int(line))
test_labels = numpy.array(test_labels, dtype=int)
test_x = numpy.zeros((test_labels.shape[0] * 28 * 28))
with open('digitdata/testimages', 'rb') as f:
for i, line in enumerate(f):
for j, char in enumerate(line.strip('\n')):
if '+' == char:
test_x[i * 28 + j] = 1
if '#' == char:
test_x[i * 28 + j] = 2
test_x = numpy.array(test_x, dtype=int).reshape((test_labels.shape[0], 28 * 28))
return train_x, train_labels, test_x, test_labels
class BayesClassifier(object):
def __init__(self):
self.bayesmatrix = None
def fit(self, X, y):
bayesmatrix = numpy.ones((10, 3, 28 * 28), dtype=numpy.float64)
for k in xrange(10):
for i in xrange(3):
for j in xrange(X.shape[1]):
bayesmatrix[k, i, j] = numpy.sum(X[y==k, j]==i)
numclass = numpy.zeros(10)
for i in xrange(10):
numclass[i] = numpy.sum(y==i) + 1
bayesmatrix += 1
bayesmatrix /= numclass[:, numpy.newaxis, numpy.newaxis]
self.bayesmatrix = bayesmatrix
def predict(self, X):
labels = []
for i in xrange(X.shape[0]):
label = numpy.argmax(numpy.sum(numpy.log(self.bayesmatrix[:, 0, X[i, :]==0]), axis=1) +
numpy.sum(numpy.log(self.bayesmatrix[:, 1, X[i, :]==1]), axis=1) +
numpy.sum(numpy.log(self.bayesmatrix[:, 2, X[i, :]==2]), axis=1))
labels.append(label)
return numpy.array(labels)
if "__main__" == __name__:
X, y, test_x, test_y = load_data()
clf = BayesClassifier()
clf.fit(X, y)
pr = clf.predict(test_x)
print "Confusion Matrix"
print confusion_matrix(test_y, pr)
print "Accuracy"
print numpy.sum(pr == test_y) / float(test_y.shape[0])
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workflow to set up gcloud environment."""
import os
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.calliope import usage_text
from googlecloudsdk.command_lib import init_util
from googlecloudsdk.core import config
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import yaml
from googlecloudsdk.core.configurations import named_configs
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.credentials import store as c_store
from googlecloudsdk.core.diagnostics import network_diagnostics
from googlecloudsdk.core.resource import resource_projector
from googlecloudsdk.core.util import platforms
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class Init(base.Command):
"""Initialize or reinitialize gcloud.
{command} launches an interactive Getting Started workflow for gcloud.
It performs the following setup steps:
- Authorizes gcloud and other SDK tools to access Google Cloud Platform using
your user account credentials, or lets you select from accounts whose
credentials are already available.
- Sets properties in a gcloud configuration, including the current project and
the default Google Compute Engine region and zone.
{command} can be used for initial setup of gcloud and to create new or
reinitialize gcloud configurations. More information can be found by
running `gcloud topic configurations`.
Properties set by {command} are local and persistent, and are not affected by
remote changes to the project. For example, the default Compute Engine zone in
your configuration remains stable, even if you or another user changes the
project-level default zone in the Cloud Platform Console.
To sync the configuration, re-run {command}
"""
@staticmethod
def Args(parser):
parser.add_argument(
'obsolete_project_arg',
nargs='?',
hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
parser.add_argument(
'--console-only',
action='store_true',
help=('Prevent the command from launching a browser for '
'authorization.'))
parser.add_argument(
'--skip-diagnostics',
action='store_true',
help='Do not run diagnostics.')
def Run(self, args):
"""Allows user to select configuration, and initialize it."""
if args.obsolete_project_arg:
raise c_exc.InvalidArgumentException(
args.obsolete_project_arg,
'`gcloud init` has changed and no longer takes a PROJECT argument. '
'Please use `gcloud source repos clone` to clone this '
'project\'s source repositories.')
log.status.write('Welcome! This command will take you through '
'the configuration of gcloud.\n\n')
if properties.VALUES.core.disable_prompts.GetBool():
raise c_exc.InvalidArgumentException(
'disable_prompts/--quiet',
'gcloud init command cannot run with disabled prompts.')
configuration_name = self._PickConfiguration()
if not configuration_name:
return
log.status.write('Your current configuration has been set to: [{0}]\n\n'
.format(configuration_name))
if not args.skip_diagnostics:
log.status.write('You can skip diagnostics next time by using the '
'following flag:\n')
log.status.write(' gcloud init --skip-diagnostics\n\n')
network_passed = network_diagnostics.NetworkDiagnostic().RunChecks()
if not network_passed:
if not console_io.PromptContinue(
message='Network errors detected.',
prompt_string='Would you like to continue anyway',
default=False):
log.status.write('You can re-run diagnostics with the following '
'command:\n')
log.status.write(' gcloud info --run-diagnostics\n\n')
return
# User project quota is now the global default, but this command calls
# legacy APIs where it should be disabled. It must happen after the config
# settings are persisted so this temporary value doesn't get persisted as
# well.
base.DisableUserProjectQuota()
if not self._PickAccount(args.console_only, preselected=args.account):
return
if not self._PickProject(preselected=args.project):
return
self._PickDefaultRegionAndZone()
self._CreateBotoConfig()
self._Summarize(configuration_name)
def _PickAccount(self, console_only, preselected=None):
"""Checks if current credentials are valid, if not runs auth login.
Args:
console_only: bool, True if the auth flow shouldn't use the browser
preselected: str, disable prompts and use this value if not None
Returns:
bool, True if valid credentials are setup.
"""
new_credentials = False
accounts = c_store.AvailableAccounts()
if accounts:
# There is at least one credentialed account.
if preselected:
# Try to use the preselected account. Fail if its not credentialed.
account = preselected
if account not in accounts:
log.status.write('\n[{0}] is not one of your credentialed accounts '
'[{1}].\n'.format(account, ','.join(accounts)))
return False
# Fall through to the set the account property.
else:
# Prompt for the account to use.
idx = console_io.PromptChoice(
accounts + ['Log in with a new account'],
message='Choose the account you would like to use to perform '
'operations for this configuration:',
prompt_string=None)
if idx is None:
return False
if idx < len(accounts):
account = accounts[idx]
else:
new_credentials = True
elif preselected:
# Preselected account specified but there are no credentialed accounts.
log.status.write('\n[{0}] is not a credentialed account.\n'.format(
preselected))
return False
else:
# Must log in with new credentials.
answer = console_io.PromptContinue(
prompt_string='You must log in to continue. Would you like to log in')
if not answer:
return False
new_credentials = True
if new_credentials:
# Call `gcloud auth login` to get new credentials.
# `gcloud auth login` may have user interaction, do not suppress it.
browser_args = ['--no-launch-browser'] if console_only else []
if not self._RunCmd(['auth', 'login'],
['--force', '--brief'] + browser_args,
disable_user_output=False):
return False
# `gcloud auth login` already did `gcloud config set account`.
else:
# Set the config account to the already credentialed account.
properties.PersistProperty(properties.VALUES.core.account, account)
log.status.write('You are logged in as: [{0}].\n\n'
.format(properties.VALUES.core.account.Get()))
return True
def _PickConfiguration(self):
"""Allows user to re-initialize, create or pick new configuration.
Returns:
Configuration name or None.
"""
configs = named_configs.ConfigurationStore.AllConfigs()
active_config = named_configs.ConfigurationStore.ActiveConfig()
if not configs or active_config.name not in configs:
# Listing the configs will automatically create the default config. The
# only way configs could be empty here is if there are no configurations
# and the --configuration flag or env var is set to something that does
# not exist. If configs has items, but the active config is not in there,
# that similarly means that hey are using the flag or the env var and that
# config does not exist. In either case, just create it and go with that
# one as the one that as they have already selected it.
named_configs.ConfigurationStore.CreateConfig(active_config.name)
# Need to active it in the file, not just the environment.
active_config.Activate()
return active_config.name
# If there is a only 1 config, it is the default, and there are no
# properties set, assume it was auto created and that it should be
# initialized as if it didn't exist.
if len(configs) == 1:
default_config = configs.get(named_configs.DEFAULT_CONFIG_NAME, None)
if default_config and not default_config.GetProperties():
default_config.Activate()
return default_config.name
choices = []
log.status.write('Settings from your current configuration [{0}] are:\n'
.format(active_config.name))
log.status.flush()
log.status.write(yaml.dump(properties.VALUES.AllValues()))
log.out.flush()
log.status.write('\n')
log.status.flush()
choices.append(
'Re-initialize this configuration [{0}] with new settings '.format(
active_config.name))
choices.append('Create a new configuration')
config_choices = [name for name, c in sorted(configs.iteritems())
if not c.is_active]
choices.extend('Switch to and re-initialize '
'existing configuration: [{0}]'.format(name)
for name in config_choices)
idx = console_io.PromptChoice(choices, message='Pick configuration to use:')
if idx is None:
return None
if idx == 0: # If reinitialize was selected.
self._CleanCurrentConfiguration()
return active_config.name
if idx == 1: # Second option is to create new configuration.
return self._CreateConfiguration()
config_name = config_choices[idx - 2]
named_configs.ConfigurationStore.ActivateConfig(config_name)
return config_name
def _PickProject(self, preselected=None):
"""Allows user to select a project.
Args:
preselected: str, use this value if not None
Returns:
str, project_id or None if was not selected.
"""
project_id = init_util.PickProject(preselected=preselected)
if project_id is not None:
properties.PersistProperty(properties.VALUES.core.project, project_id)
log.status.write('Your current project has been set to: [{0}].\n\n'
.format(project_id))
return project_id
def _PickDefaultRegionAndZone(self):
"""Pulls metadata properties for region and zone and sets them in gcloud."""
try:
# Use --quiet flag to skip the enable api prompt.
project_info = self._RunCmd(['compute', 'project-info', 'describe'],
params=['--quiet'])
except Exception: # pylint:disable=broad-except
log.status.write("""\
Not setting default zone/region (this feature makes it easier to use
[gcloud compute] by setting an appropriate default value for the
--zone and --region flag).
See https://cloud.google.com/compute/docs/gcloud-compute section on how to set
default compute region and zone manually. If you would like [gcloud init] to be
able to do this for you the next time you run it, make sure the
Compute Engine API is enabled for your project on the
https://console.developers.google.com/apis page.
""")
return None
default_zone = None
default_region = None
if project_info is not None:
project_info = resource_projector.MakeSerializable(project_info)
metadata = project_info.get('commonInstanceMetadata', {})
for item in metadata.get('items', []):
if item['key'] == 'google-compute-default-zone':
default_zone = item['value']
elif item['key'] == 'google-compute-default-region':
default_region = item['value']
# We could not determine zone automatically. Before offering choices for
# zone and/or region ask user if he/she wants to do this.
if not default_zone:
answer = console_io.PromptContinue(
prompt_string=('Do you want to configure a default Compute '
'Region and Zone?'))
if not answer:
return
# Same logic applies to region and zone properties.
def SetProperty(name, default_value, list_command):
"""Set named compute property to default_value or get via list command."""
if not default_value:
values = self._RunCmd(list_command)
if values is None:
return
values = list(values)
message = (
'Which Google Compute Engine {0} would you like to use as project '
'default?\n'
'If you do not specify a {0} via a command line flag while working '
'with Compute Engine resources, the default is assumed.').format(
name)
idx = console_io.PromptChoice(
[value['name'] for value in values]
+ ['Do not set default {0}'.format(name)],
message=message, prompt_string=None, allow_freeform=True,
freeform_suggester=usage_text.TextChoiceSuggester())
if idx is None or idx == len(values):
return
default_value = values[idx]
properties.PersistProperty(properties.VALUES.compute.Property(name),
default_value['name'])
log.status.write('Your project default Compute Engine {0} has been set '
'to [{1}].\nYou can change it by running '
'[gcloud config set compute/{0} NAME].\n\n'
.format(name, default_value['name']))
return default_value
if default_zone:
default_zone = self._RunCmd(['compute', 'zones', 'describe'],
[default_zone])
zone = SetProperty('zone', default_zone, ['compute', 'zones', 'list'])
if zone and not default_region:
default_region = zone['region']
if default_region:
default_region = self._RunCmd(['compute', 'regions', 'describe'],
[default_region])
SetProperty('region', default_region, ['compute', 'regions', 'list'])
def _Summarize(self, configuration_name):
log.status.Print('Your Google Cloud SDK is configured and ready to use!\n')
log.status.Print(
'* Commands that require authentication will use {0} by default'
.format(properties.VALUES.core.account.Get()))
project = properties.VALUES.core.project.Get()
if project:
log.status.Print(
'* Commands will reference project `{0}` by default'
.format(project))
region = properties.VALUES.compute.region.Get()
if region:
log.status.Print(
'* Compute Engine commands will use region `{0}` by default'
.format(region))
zone = properties.VALUES.compute.zone.Get()
if zone:
log.status.Print(
'* Compute Engine commands will use zone `{0}` by default\n'
.format(zone))
log.status.Print(
'Run `gcloud help config` to learn how to change individual settings\n')
log.status.Print(
'This gcloud configuration is called [{config}]. You can create '
'additional configurations if you work with multiple accounts and/or '
'projects.'.format(config=configuration_name))
log.status.Print('Run `gcloud topic configurations` to learn more.\n')
log.status.Print('Some things to try next:\n')
log.status.Print(
'* Run `gcloud --help` to see the Cloud Platform services you can '
'interact with. And run `gcloud help COMMAND` to get help on any '
'gcloud command.')
log.status.Print(
'* Run `gcloud topic -h` to learn about advanced features of the SDK '
'like arg files and output formatting')
def _CreateConfiguration(self):
configuration_name = console_io.PromptResponse(
'Enter configuration name. Names start with a lower case letter and '
'contain only lower case letters a-z, digits 0-9, and hyphens \'-\': ')
configuration_name = configuration_name.strip()
named_configs.ConfigurationStore.CreateConfig(configuration_name)
named_configs.ConfigurationStore.ActivateConfig(configuration_name)
named_configs.ActivePropertiesFile.Invalidate()
return configuration_name
def _CreateBotoConfig(self):
gsutil_path = _FindGsutil()
if not gsutil_path:
log.debug('Unable to find [gsutil]. Not configuring default .boto '
'file')
return
boto_path = platforms.ExpandHomePath(os.path.join('~', '.boto'))
if os.path.exists(boto_path):
log.debug('Not configuring default .boto file. File already '
'exists at [{boto_path}].'.format(boto_path=boto_path))
return
# 'gsutil config -n' creates a default .boto file that the user can read and
# modify.
command_args = ['config', '-n', '-o', boto_path]
if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
gsutil_args = execution_utils.ArgsForCMDTool(gsutil_path,
*command_args)
else:
gsutil_args = execution_utils.ArgsForExecutableTool(gsutil_path,
*command_args)
return_code = execution_utils.Exec(gsutil_args, no_exit=True,
out_func=log.file_only_logger.debug,
err_func=log.file_only_logger.debug)
if return_code == 0:
log.status.write("""\
Created a default .boto configuration file at [{boto_path}]. See this file and
[https://cloud.google.com/storage/docs/gsutil/commands/config] for more
information about configuring Google Cloud Storage.
""".format(boto_path=boto_path))
else:
log.status.write('Error creating a default .boto configuration file. '
'Please run [gsutil config -n] if you would like to '
'create this file.\n')
def _CleanCurrentConfiguration(self):
properties.PersistProperty(properties.VALUES.core.account, None)
properties.PersistProperty(properties.VALUES.core.project, None)
properties.PersistProperty(properties.VALUES.compute.region, None)
properties.PersistProperty(properties.VALUES.compute.zone, None)
named_configs.ActivePropertiesFile.Invalidate()
def _RunCmd(self, cmd, params=None, disable_user_output=True):
if not self._cli_power_users_only.IsValidCommand(cmd):
log.info('Command %s does not exist.', cmd)
return None
if params is None:
params = []
args = cmd + params
log.info('Executing: [gcloud %s]', ' '.join(args))
try:
# Disable output from individual commands, so that we get
# command run results, and don't clutter output of init.
if disable_user_output:
args.append('--no-user-output-enabled')
if (properties.VALUES.core.verbosity.Get() is None and
disable_user_output):
# Unless user explicitly set verbosity, suppress from subcommands.
args.append('--verbosity=none')
if properties.VALUES.core.log_http.GetBool():
args.append('--log-http')
# TODO(b/38338044): Remove usage of ExecuteCommandDoNotUse
return resource_projector.MakeSerializable(
self.ExecuteCommandDoNotUse(args))
except SystemExit as exc:
log.info('[%s] has failed\n', ' '.join(cmd + params))
raise c_exc.FailedSubCommand(cmd + params, exc.code)
except BaseException:
log.info('Failed to run [%s]\n', ' '.join(cmd + params))
raise
def _FindGsutil():
"""Finds the bundled gsutil wrapper.
Returns:
The path to gsutil.
"""
sdk_bin_path = config.Paths().sdk_bin_path
if not sdk_bin_path:
return
if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
gsutil = 'gsutil.cmd'
else:
gsutil = 'gsutil'
return os.path.join(sdk_bin_path, gsutil)
|
import json
from pprint import pprint, pformat
from dateutil.parser import parse as parsetimestamp
SILENCE_STATUSES = [
"CREATE_COMPLETE",
"CREATE_IN_PROGRESS",
"DELETE_COMPLETE",
"DELETE_IN_PROGRESS",
"REVIEW_IN_PROGRESS",
"ROLLBACK_COMPLETE",
"ROLLBACK_IN_PROGRESS",
"UPDATE_COMPLETE",
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_IN_PROGRESS",
]
def get_time(event):
return parsetimestamp(event["Timestamp"])
def tprint(title):
print("%s\n%s\n" % (title, "#" * len(title)))
def iformat(string, indent=0):
if not isinstance(object, str):
string = pformat(string)
return ("\n" + " " * indent).join(string.splitlines())
messages = json.load(open(".cf.messages"))
events = messages["StackEvents"]
relevant = []
ignored = set()
last_time = get_time(events[0])
for event in events:
age = last_time - get_time(event)
status = event.get("ResourceStatus")
if age.seconds > 60:
break
last_time = get_time(event)
if status not in SILENCE_STATUSES:
event["RelativeAge"] = str(age)
relevant.append(event)
else:
ignored.add(status)
if ignored:
print("Ignoring %s" % ", ".join(ignored))
if relevant:
print("\nTraceback (most recent event at botom):")
for event in relevant[::-1]:
status = event.pop("ResourceStatus")
properties = event.get("ResourceProperties", "{}")
try:
event["ResourceProperties"] = json.loads(properties)
except:
print("could not process properties '%s'" % properties)
print(status)
for key, value in event.items():
print(" %s: %s" % (key, iformat(value, 8)))
print("")
else:
print("CloudFormation Stack's logs looks clear.")
|
# RUN: %PYTHON %s
import absl.testing
import numpy
import test_util
import urllib.request
from PIL import Image
model_path = "https://tfhub.dev/tensorflow/lite-model/mobilenet_v2_1.0_224_quantized/1/default/1?lite-format=tflite"
class MobilenetQuantTest(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(MobilenetQuantTest, self).__init__(model_path, *args, **kwargs)
def compare_results(self, iree_results, tflite_results, details):
super(MobilenetQuantTest, self).compare_results(iree_results, tflite_results, details)
self.assertTrue(numpy.isclose(iree_results[0], tflite_results[0], atol=1e-6).all())
def generate_inputs(self, input_details):
img_path = "https://github.com/google-coral/test_data/raw/master/cat.bmp"
local_path = "/".join([self.workdir, "cat.bmp"])
urllib.request.urlretrieve(img_path, local_path)
shape = input_details[0]["shape"]
im = numpy.array(Image.open(local_path).resize((shape[1], shape[2])))
args = [im.reshape(shape)]
return args
def test_compile_tflite(self):
self.compile_and_execute()
if __name__ == '__main__':
absl.testing.absltest.main()
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ........................................ NOTICE
#
# This file has been derived and modified from a source licensed under Apache Version 2.0.
# See files NOTICE and README.md for more details.
#
# ........................................ ******
"""
logtools._parse
Log format parsing programmatic and command-line utilities.
uses the logtools.parsers module
"""
import sys
import logging
from operator import and_
from optparse import OptionParser
from functools import reduce
import json
import logtools.parsers
import logtools.parsers2
from ._config import interpolate_config, AttrDict, setLoglevel
from ._config import checkDpath
from .parsers2 import FileFormat , TraditionalFileFormat, ForwardFormat
from .parsers2 import TraditionalForwardFormat
from .utils import getObj
checkDpath()
__all__ = ['logparse_parse_args', 'logparse', 'logparse_main']
def logparse_parse_args():
parser = OptionParser()
parser.add_option("-p", "--parser", dest="parser", default=None,
help="Log format parser (e.g 'CommonLogFormat'). See documentation for available parsers.") # noqa
parser.add_option("-F", "--format", dest="format", default=None,
help="Format string. Used by the parser (e.g AccessLog format specifier)") # noqa
parser.add_option("-f", "--field", dest="field", default=None,
help="Parsed Field index to output")
parser.add_option("-i", "--ignore", dest="ignore", default=None, action="store_true", # noqa
help="Ignore missing fields errors (skip lines with missing fields)") # noqa
parser.add_option("-H", "--header", dest="header", default=None, action="store_true", # noqa
help="Prepend a header describing the selected fields to output.") # noqa
parser.add_option("-P", "--profile", dest="profile", default='logparse',
help="Configuration profile (section in configuration file)") # noqa
parser.add_option("-R", "--raw", dest="raw", default=None, action="store_true",
help="When set output is not encoded for UTF-8")
## default kept for compatibility
# logging level for debug and other information
parser.add_option("-s","--sym" , type = str,
dest="logLevSym",
help="logging level (symbol)")
parser.add_option("-n","--num" , type=int ,
dest="logLevVal",
help="logging level (value)")
options, args = parser.parse_args()
# Interpolate from configuration
options.parser = interpolate_config(options.parser, options.profile, 'parser')
options.format = interpolate_config(options.format, options.profile, 'format',
default=False)
options.field = interpolate_config(options.field, options.profile, 'field')
options.ignore = interpolate_config(options.ignore, options.profile, 'ignore',
default=False, type=bool)
options.header = interpolate_config(options.header, options.profile, 'header',
default=False, type=bool)
options.raw = interpolate_config(options.raw, options.profile, 'raw')
# Set the logging level
setLoglevel(options)
return AttrDict(options.__dict__), args
def logparse(options, args, fh):
"""Parse given input stream using given
parser class and emit specified field(s)"""
field = options.field
logtools.parsers2.addConfigFileSection()
parser = getObj(options.parser, (logtools.parsers, logtools.parsers2))()
if options.get('format', None):
parser.set_format(options.format)
keyfunc = None
keys = None
if isinstance(options.field, int) or \
(isinstance(options.field, str) and options.field.isdigit()):
# Field given as integer (index)
field = int(options.field) - 1
key_func = lambda x: parser(x.strip()).by_index(field, raw=True)
keys = [options.field]
else:
if isinstance(parser, logtools.parsers2.JSONParserPlus):
key_func = logtools.parsers2.dpath_getter_gen(parser, options.field, options)
else:
# Field given as string
# Check how many fields are requested
keys = options.field.split(",")
L = len(keys)
if L == 1:
key_func = lambda x: parser(x.strip())[field]
else:
# Multiple fields requested
is_indices = reduce(and_, (k.isdigit() for k in keys), True)
key_func = logtools.parsers.multikey_getter_gen(parser, keys,
is_indices=is_indices)
if options.header is True:
yield '\t'.join(keys)
for line in fh:
try:
yield key_func(line)
except KeyError as exc:
# Could not find user-specified field
logging.warn("Could not match user-specified fields: %s", exc)
except ValueError as exc:
# Could not parse the log line
if options.ignore:
logging.debug("Could not match fields for parsed line: %s", line)
continue
else:
logging.error("Could not match fields for parsed line: %s", line)
raise
def logparse_main():
"""Console entry-point"""
options, args = logparse_parse_args()
for row in logparse(options, args, fh=sys.stdin):
if row:
if isinstance(row, dict):
json.dump(row, sys.stdout)
elif options.raw:
print(row)
else:
print( row.encode('ascii', 'ignore') )
return 0
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9634")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9634")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
import numpy as np
def relu(input):
'''Define your relu activation function here'''
# Calculate the value for the output of the relu function: output
output = max(input, 0)
# Return the value just calculated
return(output)
input_data = np.array([3,5])
# Calculate node 0 value: node_0_output
node_0_input = (input_data * weights['node_0']).sum()
node_0_output = relu(node_0_input)
# Calculate node 1 value: node_1_output
node_1_input = (input_data * weights['node_1']).sum()
node_1_output = relu(node_1_input)
# Put node values into array: hidden_layer_outputs
hidden_layer_outputs = np.array([node_0_output, node_1_output])
# Calculate model output (do not apply relu)
model_output = (hidden_layer_outputs * weights['output']).sum()
# Print model output
print(model_output)
|
# Copyright (C) 2019 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import scenario_cfg_lib
import launch_cfg_lib
import common
import pt
def is_nuc_whl_linux(names, vmid):
uos_type = names['uos_types'][vmid]
board_name = names['board_name']
if launch_cfg_lib.is_linux_like(uos_type) and board_name not in ("apl-mrb", "apl-up2"):
return True
return False
def is_mount_needed(virt_io, vmid):
if True in launch_cfg_lib.MOUNT_FLAG_DIC[vmid]:
return True
return False
def tap_uos_net(names, virt_io, vmid, config):
uos_type = names['uos_types'][vmid]
board_name = names['board_name']
vm_name = common.undline_name(uos_type).lower()
if launch_cfg_lib.is_linux_like(uos_type) or uos_type in ("ANDROID", "ALIOS"):
i = 0
for mount_flag in launch_cfg_lib.MOUNT_FLAG_DIC[vmid]:
if not mount_flag:
i += 1
continue
blk = virt_io['block'][vmid][i]
rootfs_img = blk.split(':')[1].strip(':')
print('if [ ! -f "/data{}/{}" ]; then'.format(i, rootfs_img), file=config)
print(' echo "no /data{}/{}, exit"'.format(i, rootfs_img), file=config)
print(" exit", file=config)
print("fi", file=config)
print("", file=config)
i += 1
print("#vm-name used to generate uos-mac address", file=config)
print("mac=$(cat /sys/class/net/e*/address)", file=config)
print("vm_name=post_vm_id$1", file=config)
print("mac_seed=${mac:0:17}-${vm_name}", file=config)
print("", file=config)
for net in virt_io['network'][vmid]:
if net:
net_name = net
if ',' in net:
net_name = net.split(',')[0]
print("tap_net tap_{}".format(net_name), file=config)
print("#check if the vm is running or not", file=config)
print("vm_ps=$(pgrep -a -f acrn-dm)", file=config)
print('result=$(echo $vm_ps | grep -w "${vm_name}")', file=config)
print('if [[ "$result" != "" ]]; then', file=config)
print(' echo "$vm_name is running, can\'t create twice!"', file=config)
print(" exit", file=config)
print("fi", file=config)
print("", file=config)
def off_line_cpus(args, vmid, uos_type, config):
"""
:param args: the dictionary of argument for acrn-dm
:param vmid: ID of the vm
:param uos_type: the type of UOS
:param config: it is a file pointer to write offline cpu information
"""
pcpu_id_list = get_cpu_affinity_list(args["cpu_affinity"], vmid)
if not pcpu_id_list:
sos_vmid = launch_cfg_lib.get_sos_vmid()
cpu_affinity = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "cpu_affinity", "pcpu_id")
pcpu_id_list = get_cpu_affinity_list(cpu_affinity, sos_vmid+vmid)
if not pcpu_id_list:
key = "scenario config error"
launch_cfg_lib.ERR_LIST[key] = "No available cpu to offline and pass it to vm {}".format(vmid)
print("# offline pinned vCPUs from SOS before launch UOS", file=config)
print('cpu_path="/sys/devices/system/cpu"', file=config)
print("for i in `ls ${cpu_path}`; do", file=config)
print(" for j in {}; do".format(' '.join([str(i) for i in pcpu_id_list])), file=config)
print(' if [ "cpu"$j = $i ]; then', file=config)
print(' online=`cat ${cpu_path}/$i/online`', file=config)
print(' idx=`echo $i | tr -cd "[1-99]"`', file=config)
print(' echo $i online=$online', file=config)
print(' if [ "$online" = "1" ]; then', file=config)
print(" echo 0 > ${cpu_path}/$i/online", file=config)
print(" online=`cat ${cpu_path}/$i/online`", file=config)
print(" # during boot time, cpu hotplug may be disabled by pci_device_probe during a pci module insmod", file=config)
print(' while [ "$online" = "1" ]; do', file=config)
print(" sleep 1", file=config)
print(" echo 0 > ${cpu_path}/$i/online", file=config)
print(" online=`cat ${cpu_path}/$i/online`", file=config)
print(" done", file=config)
print(" echo $idx > /sys/devices/virtual/misc/acrn_hsm/remove_cpu", file=config)
print(" fi", file=config)
print(" fi", file=config)
print(" done", file=config)
print("done", file=config)
print("", file=config)
def run_container(board_name, uos_type, config):
"""
The container contains the clearlinux as rootfs
:param board_name: board name
:param uos_type: the os name of user os
:param config: the file pointer to store the information
"""
# the runC.json is store in the path under board name, but for nuc7i7dnb/nuc6cayh/kbl-nuc-i7 is under nuc/
if 'nuc' in board_name:
board_name = 'nuc'
if board_name not in ("apl-mrb", "nuc") or not launch_cfg_lib.is_linux_like(uos_type):
return
print("function run_container()", file=config)
print("{", file=config)
print("vm_name=vm1", file=config)
print('config_src="/usr/share/acrn/samples/{}/runC.json"'.format(board_name), file=config)
print('shell="/usr/share/acrn/conf/add/$vm_name.sh"', file=config)
print('arg_file="/usr/share/acrn/conf/add/$vm_name.args"', file=config)
print('runc_bundle="/usr/share/acrn/conf/add/runc/$vm_name"', file=config)
print('rootfs_dir="/usr/share/acrn/conf/add/runc/rootfs"', file=config)
print('config_dst="$runc_bundle/config.json"', file=config)
print("", file=config)
print("", file=config)
print("input=$(runc list -f table | awk '{print $1}''{print $3}')", file=config)
print("arr=(${input// / })", file=config)
print("", file=config)
print("for((i=0;i<${#arr[@]};i++))", file=config)
print("do", file=config)
print(' if [ "$vm_name" = "${arr[$i]}" ]; then', file=config)
print(' if [ "running" = "${arr[$i+1]}" ]; then', file=config)
print(' echo "runC instance ${arr[$i]} is running"', file=config)
print(" exit", file=config)
print(" else", file=config)
print(" runc kill ${arr[$i]}", file=config)
print(" runc delete ${arr[$i]}", file=config)
print(" fi", file=config)
print(" fi", file=config)
print("done", file=config)
print("vmsts=$(acrnctl list)", file=config)
print("vms=(${vmsts// / })", file=config)
print("for((i=0;i<${#vms[@]};i++))", file=config)
print("do", file=config)
print(' if [ "$vm_name" = "${vms[$i]}" ]; then', file=config)
print(' if [ "stopped" != "${vms[$i+1]}" ]; then', file=config)
print(' echo "Uos ${vms[$i]} ${vms[$i+1]}"', file=config)
print(" acrnctl stop ${vms[$i]}", file=config)
print(" fi", file=config)
print(" fi", file=config)
print("done", file=config)
dst_str = """ cp "$config_src" "$config_dst"
args=$(sed '{s/-C//g;s/^[ \\t]*//g;s/^/\\"/;s/ /\\",\\"/g;s/$/\\"/}' ${arg_file})
sed -i "s|\\"sh\\"|\\"$shell\\", $args|" $config_dst"""
print('', file=config)
print('if [ ! -f "$shell" ]; then', file=config)
print(' echo "Pls add the vm at first!"', file=config)
print(' exit', file=config)
print('fi', file=config)
print('', file=config)
print('if [ ! -f "$arg_file" ]; then', file=config)
print(' echo "Pls add the vm args!"', file=config)
print(' exit', file=config)
print('fi', file=config)
print('', file=config)
print('if [ ! -d "$rootfs_dir" ]; then', file=config)
print(' mkdir -p "$rootfs_dir"', file=config)
print('fi', file=config)
print('if [ ! -d "$runc_bundle" ]; then', file=config)
print(' mkdir -p "$runc_bundle"', file=config)
print('fi', file=config)
print('if [ ! -f "$config_dst" ]; then', file=config)
print('{}'.format(dst_str), file=config)
print('fi', file=config)
print('runc run --bundle $runc_bundle -d $vm_name', file=config)
print('echo "The runC container is running in backgroud"', file=config)
print('echo "\'#runc exec <vmname> bash\' to login the container bash"', file=config)
print('exit', file=config)
print('}', file=config)
print('', file=config)
def boot_image_type(args, vmid, config):
if not args['vbootloader'][vmid] or (args['vbootloader'][vmid] and args['vbootloader'][vmid] != "vsbl"):
return
print('boot_dev_flag=",b"', file=config)
print("if [ $4 == 1 ];then", file=config)
print(' boot_image_option="--vsbl /usr/share/acrn/bios/VSBL_debug.bin"', file=config)
print("else", file=config)
print(' boot_image_option="--vsbl /usr/share/acrn/bios/VSBL.bin"', file=config)
print("fi", file=config)
print("", file=config)
def interrupt_storm(pt_sel, config):
if not pt_sel:
return
# TODO: --intr_monitor should be configurable by user
print("#interrupt storm monitor for pass-through devices, params order:", file=config)
print("#threshold/s,probe-period(s),intr-inject-delay-time(ms),delay-duration(ms)", file=config)
print('intr_storm_monitor="--intr_monitor 10000,10,1,100"', file=config)
print("", file=config)
def gvt_arg_set(dm, vmid, uos_type, config):
gvt_args = dm['gvt_args'][vmid]
if gvt_args == "gvtd":
bus = int(launch_cfg_lib.GPU_BDF.split(':')[0], 16)
dev = int(launch_cfg_lib.GPU_BDF.split('.')[0].split(':')[1], 16)
fun = int(launch_cfg_lib.GPU_BDF.split('.')[1], 16)
print(' -s 2,passthru,{}/{}/{},gpu \\'.format(bus, dev, fun), file=config)
elif gvt_args:
print(' -s 2,pci-gvt -G "$2" \\', file=config)
def log_level_set(uos_type, config):
print("#logger_setting, format: logger_name,level; like following", file=config)
print('logger_setting="--logger_setting console,level=4;kmsg,level=3;disk,level=5"', file=config)
print("", file=config)
def tap_network(virt_io, vmid, config):
none_i = 0
tap_net_list = virt_io['network'][vmid]
for net in tap_net_list:
if net == None:
none_i += 1
tap_net_num = len(tap_net_list) - none_i
if tap_net_num >= 1:
print("function tap_net() {", file=config)
print("# create a unique tap device for each VM", file=config)
print("tap=$1", file=config)
print('tap_exist=$(ip a | grep "$tap" | awk \'{print $1}\')', file=config)
print('if [ "$tap_exist"x != "x" ]; then', file=config)
print(' echo "tap device existed, reuse $tap"', file=config)
print("else", file=config)
print(" ip tuntap add dev $tap mode tap", file=config)
print("fi", file=config)
print("", file=config)
print("# if acrn-br0 exists, add VM's unique tap device under it", file=config)
print("br_exist=$(ip a | grep acrn-br0 | awk '{print $1}')", file=config)
print('if [ "$br_exist"x != "x" -a "$tap_exist"x = "x" ]; then', file=config)
print(' echo "acrn-br0 bridge aleady exists, adding new tap device to it..."', file=config)
print(' ip link set "$tap" master acrn-br0', file=config)
print(' ip link set dev "$tap" down', file=config)
print(' ip link set dev "$tap" up', file=config)
print("fi", file=config)
print("}", file=config)
print("", file=config)
def launch_begin(names, virt_io, vmid, config):
board_name = names['board_name']
uos_type = names['uos_types'][vmid]
launch_uos = common.undline_name(uos_type).lower()
tap_network(virt_io, vmid, config)
run_container(board_name, uos_type, config)
print("function launch_{}()".format(launch_uos), file=config)
print("{", file=config)
def wa_usage(uos_type, config):
if uos_type in ("ANDROID", "ALIOS"):
print("# WA for USB role switch hang issue, disable runtime PM of xHCI device", file=config)
print("echo on > /sys/devices/pci0000:00/0000:00:15.0/power/control", file=config)
print("", file=config)
def mem_size_set(args, vmid, config):
mem_size = args['mem_size'][vmid]
print("mem_size={}M".format(mem_size), file=config)
def uos_launch(names, args, virt_io, vmid, config):
gvt_args = args['gvt_args'][vmid]
uos_type = names['uos_types'][vmid]
launch_uos = common.undline_name(uos_type).lower()
board_name = names['board_name']
if 'nuc' in board_name:
board_name = 'nuc'
if uos_type == "CLEARLINUX" and board_name in ("apl-mrb", "nuc"):
print('if [ "$1" = "-C" ];then', file=config)
print(' if [ $(hostname) = "runc" ]; then', file=config)
print(' echo "Already in container exit!"', file=config)
print(" exit", file=config)
print(" fi", file=config)
print(' echo "runc_container"', file=config)
print(" run_container", file=config)
if board_name == "apl-mrb":
print(" exit", file=config)
print("fi", file=config)
if is_mount_needed(virt_io, vmid):
print("", file=config)
if gvt_args == "gvtd" or not gvt_args:
print('launch_{} {} "{}" $debug'.format(launch_uos, vmid, vmid), file=config)
else:
print('launch_{} {} "{}" "{}" $debug'.format(launch_uos, vmid, gvt_args, vmid), file=config)
print("", file=config)
i = 0
for mount_flag in launch_cfg_lib.MOUNT_FLAG_DIC[vmid]:
if not mount_flag:
i += 1
continue
print("umount /data{}".format(i), file=config)
i += 1
else:
print("else", file=config)
if gvt_args == "gvtd" or not gvt_args:
print(' launch_{} {}'.format(launch_uos, vmid), file=config)
elif gvt_args:
print(' launch_{} {} "{}"'.format(launch_uos, vmid, gvt_args), file=config)
print("fi", file=config)
return
elif not is_mount_needed(virt_io, vmid):
if gvt_args == "gvtd" or not gvt_args:
print('launch_{} {}'.format(launch_uos, vmid), file=config)
else:
print('launch_{} {} "{}"'.format(launch_uos, vmid, gvt_args), file=config)
else:
print("", file=config)
if gvt_args == "gvtd" or not gvt_args:
print('launch_{} {} "{}" $debug'.format(launch_uos, vmid, vmid), file=config)
else:
print('launch_{} {} "{}" "{}" $debug'.format(launch_uos, vmid, gvt_args, vmid), file=config)
print("", file=config)
i = 0
for mount_flag in launch_cfg_lib.MOUNT_FLAG_DIC[vmid]:
if not mount_flag:
i += 1
continue
print("umount /data{}".format(i), file=config)
i += 1
def launch_end(names, args, virt_io, vmid, config):
board_name = names['board_name']
uos_type = names['uos_types'][vmid]
mem_size = args["mem_size"][vmid]
if uos_type in ("CLEARLINUX", "ANDROID", "ALIOS") and not is_nuc_whl_linux(names, vmid):
print("debug=0", file=config)
print("", file=config)
print('while getopts "hdC" opt', file=config)
print("do", file=config)
print(" case $opt in", file=config)
print(" d) debug=1", file=config)
print(" ;;", file=config)
print(" C)", file=config)
print(" ;;", file=config)
print(" h) help", file=config)
print(" exit 1", file=config)
print(" ;;", file=config)
print(" ?) help", file=config)
print(" exit 1", file=config)
print(" ;;", file=config)
print(" esac", file=config)
print("done", file=config)
print("", file=config)
if is_mount_needed(virt_io, vmid):
i = 0
for mount_flag in launch_cfg_lib.MOUNT_FLAG_DIC[vmid]:
if not mount_flag:
i += 1
continue
blk = virt_io['block'][vmid][i]
root_fs = blk.split(':')[0]
print('if [ ! -b "{}" ]; then'.format(root_fs), file=config)
print(' echo "no {} data partition, exit"'.format(root_fs), file=config)
print(" exit", file=config)
print("fi", file=config)
print("mkdir -p /data{}".format(i), file=config)
print("mount {} /data{}".format(root_fs, i), file=config)
print("", file=config)
i += 1
sos_vmid = launch_cfg_lib.get_sos_vmid()
if args['cpu_sharing'] == "SCHED_NOOP" or common.VM_TYPES[vmid+sos_vmid] == "POST_RT_VM":
off_line_cpus(args, vmid, uos_type, config)
uos_launch(names, args, virt_io, vmid, config)
def set_dm_pt(names, sel, vmid, config, dm):
uos_type = names['uos_types'][vmid]
if sel.bdf['usb_xdci'][vmid] and sel.slot['usb_xdci'][vmid]:
sub_attr = ''
if uos_type == "WINDOWS":
sub_attr = ',d3hot_reset'
print(' -s {},passthru,{}/{}/{}{} \\'.format(sel.slot["usb_xdci"][vmid], sel.bdf["usb_xdci"][vmid][0:2],\
sel.bdf["usb_xdci"][vmid][3:5], sel.bdf["usb_xdci"][vmid][6:7], sub_attr), file=config)
# pass through audio/audio_codec
if sel.bdf['audio'][vmid]:
print(" $boot_audio_option \\", file=config)
if sel.bdf['cse'][vmid] and sel.slot['cse'][vmid]:
print(" $boot_cse_option \\", file=config)
if sel.bdf["sd_card"][vmid] and sel.slot['sd_card'][vmid]:
print(' -s {},passthru,{}/{}/{} \\'.format(sel.slot["sd_card"][vmid], sel.bdf["sd_card"][vmid][0:2], \
sel.bdf["sd_card"][vmid][3:5], sel.bdf["sd_card"][vmid][6:7]), file=config)
if sel.bdf['bluetooth'][vmid] and sel.slot['bluetooth'][vmid]:
print(' -s {},passthru,{}/{}/{} \\'.format(sel.slot["bluetooth"][vmid], sel.bdf["bluetooth"][vmid][0:2], \
sel.bdf["bluetooth"][vmid][3:5], sel.bdf["bluetooth"][vmid][6:7]), file=config)
if sel.bdf['wifi'][vmid] and sel.slot['wifi'][vmid]:
if uos_type == "ANDROID":
print(" -s {},passthru,{}/{}/{},keep_gsi \\".format(sel.slot["wifi"][vmid], sel.bdf["wifi"][vmid][0:2], \
sel.bdf["wifi"][vmid][3:5], sel.bdf["wifi"][vmid][6:7]), file=config)
else:
print(" -s {},passthru,{}/{}/{} \\".format(sel.slot["wifi"][vmid], sel.bdf["wifi"][vmid][0:2], \
sel.bdf["wifi"][vmid][3:5], sel.bdf["wifi"][vmid][6:7]), file=config)
if sel.bdf['ipu'][vmid] or sel.bdf['ipu_i2c'][vmid]:
print(" $boot_ipu_option \\", file=config)
if sel.bdf['ethernet'][vmid] and sel.slot['ethernet'][vmid]:
if vmid in dm["enable_ptm"] and dm["enable_ptm"][vmid] == 'y':
print(" -s {},passthru,{}/{}/{},enable_ptm \\".format(sel.slot["ethernet"][vmid], sel.bdf["ethernet"][vmid][0:2], \
sel.bdf["ethernet"][vmid][3:5], sel.bdf["ethernet"][vmid][6:7]), file=config)
else:
print(" -s {},passthru,{}/{}/{} \\".format(sel.slot["ethernet"][vmid], sel.bdf["ethernet"][vmid][0:2], \
sel.bdf["ethernet"][vmid][3:5], sel.bdf["ethernet"][vmid][6:7]), file=config)
if sel.bdf['sata'] and sel.slot["sata"][vmid]:
print(" -s {},passthru,{}/{}/{} \\".format(sel.slot["sata"][vmid], sel.bdf["sata"][vmid][0:2], \
sel.bdf["sata"][vmid][3:5], sel.bdf["sata"][vmid][6:7]), file=config)
if sel.bdf['nvme'] and sel.slot["nvme"][vmid]:
print(" -s {},passthru,{}/{}/{} \\".format(sel.slot["nvme"][vmid], sel.bdf["nvme"][vmid][0:2], \
sel.bdf["nvme"][vmid][3:5], sel.bdf["nvme"][vmid][6:7]), file=config)
def vboot_arg_set(dm, vmid, config):
"""
Set the argument of vbootloader
:param dm: the dictionary of argument for acrn-dm
:param vmid: ID of the vm
:param config: it is a file pointer to write vboot loader information
:return: None
"""
# TODO: Support to generate '-k' xml config from webUI and to parse it
if dm['vbootloader'][vmid] == "ovmf":
print(" --ovmf /usr/share/acrn/bios/OVMF.fd \\", file=config)
elif dm['vbootloader'][vmid] == "vsbl":
print(" $boot_image_option \\",file=config)
def xhci_args_set(dm, vmid, config):
# usb_xhci set, the value is string
if dm['xhci'][vmid]:
print(" -s {},xhci,{} \\".format(
launch_cfg_lib.virtual_dev_slot("xhci"), dm['xhci'][vmid]), file=config)
def shm_arg_set(dm, vmid, config):
if dm['shm_enabled'] == "n":
return
for shm_region in dm["shm_regions"][vmid]:
print(" -s {},ivshmem,{} \\".format(
launch_cfg_lib.virtual_dev_slot("shm_region_{}".format(shm_region)), shm_region), file=config)
def virtio_args_set(dm, virt_io, vmid, config):
# virtio-input set, the value type is a list
for input_val in virt_io['input'][vmid]:
if input_val:
print(" -s {},virtio-input,{} \\".format(
launch_cfg_lib.virtual_dev_slot("virtio-input{}".format(input_val)), input_val), file=config)
# virtio-blk set, the value type is a list
i = 0
for mount_flag in launch_cfg_lib.MOUNT_FLAG_DIC[vmid]:
blk = virt_io['block'][vmid][i]
if not mount_flag:
if blk:
rootfs_img = blk.strip(':')
print(" -s {},virtio-blk,{} \\".format(launch_cfg_lib.virtual_dev_slot("virtio-blk{}".format(blk)), rootfs_img), file=config)
i += 1
continue
rootfs_img = blk.split(':')[1].strip(':')
print(" -s {},virtio-blk,/data{}/{} \\".format(launch_cfg_lib.virtual_dev_slot("blk_mount_{}".format(i)), i, rootfs_img), file=config)
i += 1
# virtio-net set, the value type is a list
for net in virt_io['network'][vmid]:
if net:
print(" -s {},virtio-net,tap_{} \\".format(launch_cfg_lib.virtual_dev_slot("virtio-net{}".format(net)), net), file=config)
# virtio-console set, the value type is a string
if virt_io['console'][vmid]:
print(" -s {},virtio-console,{} \\".format(
launch_cfg_lib.virtual_dev_slot("virtio-console"),
virt_io['console'][vmid]), file=config)
def get_cpu_affinity_list(cpu_affinity, vmid):
pcpu_id_list = ''
for uos_id,cpus in cpu_affinity.items():
if vmid == uos_id:
pcpu_id_list = [id for id in list(cpu_affinity[uos_id]) if id != None]
return pcpu_id_list
def pcpu_arg_set(dm, vmid, config):
if dm['cpu_sharing'] == "SCHED_NOOP":
return
pcpu_id_list = get_cpu_affinity_list(dm["cpu_affinity"], vmid)
if pcpu_id_list:
print(" --cpu_affinity {} \\".format(','.join(pcpu_id_list)), file=config)
def dm_arg_set(names, sel, virt_io, dm, vmid, config):
uos_type = names['uos_types'][vmid]
board_name = names['board_name']
boot_image_type(dm, vmid, config)
# uuid get
sos_vmid = launch_cfg_lib.get_sos_vmid()
scenario_uuid = launch_cfg_lib.get_scenario_uuid(vmid, sos_vmid)
# clearlinux/android/alios
print('acrn-dm -A -m $mem_size -s 0:0,hostbridge -U {} \\'.format(scenario_uuid), file=config)
if launch_cfg_lib.is_linux_like(uos_type) or uos_type in ("ANDROID", "ALIOS"):
if uos_type in ("ANDROID", "ALIOS"):
print(' $npk_virt \\', file=config)
print(" -s {},virtio-rpmb \\".format(launch_cfg_lib.virtual_dev_slot("virtio-rpmb")), file=config)
print(" --enable_trusty \\", file=config)
# mac_seed
print(" --mac_seed $mac_seed \\", file=config)
if dm['rtos_type'][vmid] != "no":
if virt_io:
print(" --virtio_poll 1000000 \\", file=config)
if dm['rtos_type'][vmid] == "Soft RT":
print(" --rtvm \\", file=config)
if dm['rtos_type'][vmid] == "Hard RT":
print(" --lapic_pt \\", file=config)
# windows
if uos_type == "WINDOWS":
print(" --windows \\", file=config)
# pm_channel set
if dm['pm_channel'][vmid] and dm['pm_channel'][vmid] != None:
pm_key = dm['pm_channel'][vmid]
pm_vuart = "--pm_notify_channel uart"
if vmid in dm["allow_trigger_s5"] and dm["allow_trigger_s5"][vmid] == 'y':
pm_vuart = pm_vuart + ",allow_trigger_s5 "
else:
pm_vuart = pm_vuart + " "
if pm_key == "vuart1(tty)":
vuart_base = launch_cfg_lib.get_vuart1_from_scenario(sos_vmid + vmid)
if vuart_base == "INVALID_COM_BASE":
err_key = "uos:id={}:poweroff_channel".format(vmid)
launch_cfg_lib.ERR_LIST[err_key] = "vuart1 of VM{} in scenario file should select 'SOS_COM2_BASE'".format(sos_vmid + vmid)
return
scenario_cfg_lib.get_sos_vuart_settings()
print(" {} \\".format(pm_vuart + launch_cfg_lib.PM_CHANNEL_DIC[pm_key] + scenario_cfg_lib.SOS_UART1_VALID_NUM), file=config)
elif pm_key == "vuart1(pty)":
print(" {} \\".format(pm_vuart + launch_cfg_lib.PM_CHANNEL_DIC[pm_key]), file=config)
else:
print(" {} \\".format(launch_cfg_lib.PM_CHANNEL_DIC[pm_key]), file=config)
# set logger_setting for all VMs
print(" $logger_setting \\", file=config)
# XHCI args set
xhci_args_set(dm, vmid, config)
# VIRTIO args set
virtio_args_set(dm, virt_io, vmid, config)
# GVT args set
gvt_arg_set(dm, vmid, uos_type, config)
# vbootloader setting
vboot_arg_set(dm, vmid, config)
# pcpu-list args set
pcpu_arg_set(dm, vmid, config)
# shm regions args set
shm_arg_set(dm, vmid, config)
# ssram set
ssram_enabled = 'n'
try:
ssram_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "SSRAM", "SSRAM_ENABLED")
except:
pass
if uos_type == "PREEMPT-RT LINUX" and ssram_enabled == 'y':
print(" --ssram \\", file=config)
for value in sel.bdf.values():
if value[vmid]:
print(" $intr_storm_monitor \\", file=config)
break
if uos_type != "PREEMPT-RT LINUX":
print(" -s 31:0,lpc \\", file=config)
# redirect console
if dm['vuart0'][vmid] == "Enable":
print(" -l com1,stdio \\", file=config)
if launch_cfg_lib.is_linux_like(uos_type) or uos_type in ("ANDROID", "ALIOS"):
if board_name == "apl-mrb":
print(" -i /run/acrn/ioc_$vm_name,0x20 \\", file=config)
print(" -l com2,/run/acrn/ioc_$vm_name \\", file=config)
if not is_nuc_whl_linux(names, vmid):
print(" -s {},wdt-i6300esb \\".format(launch_cfg_lib.virtual_dev_slot("wdt-i6300esb")), file=config)
set_dm_pt(names, sel, vmid, config, dm)
if dm['console_vuart'][vmid] == "Enable":
print(" -s {},uart,vuart_idx:0 \\".format(launch_cfg_lib.virtual_dev_slot("console_vuart")), file=config)
for vuart_id in dm["communication_vuarts"][vmid]:
if not vuart_id:
break
print(" -s {},uart,vuart_idx:{} \\".format(
launch_cfg_lib.virtual_dev_slot("communication_vuart_{}".format(vuart_id)), vuart_id), file=config)
print(" $vm_name", file=config)
print("}", file=config)
def gen(names, pt_sel, virt_io, dm, vmid, config):
board_name = names['board_name']
uos_type = names['uos_types'][vmid]
# passthrough bdf/vpid dictionay
pt.gen_pt_head(names, dm, pt_sel, vmid, config)
# gen launch header
launch_begin(names, virt_io, vmid, config)
tap_uos_net(names, virt_io, vmid, config)
# passthrough device
pt.gen_pt(names, dm, pt_sel, vmid, config)
wa_usage(uos_type, config)
mem_size_set(dm, vmid, config)
interrupt_storm(pt_sel, config)
log_level_set(uos_type, config)
# gen acrn-dm args
dm_arg_set(names, pt_sel, virt_io, dm, vmid, config)
# gen launch end
launch_end(names, dm, virt_io, vmid, config)
|
import math
from typing import List, Union, Tuple
import torch
import torch.nn as nn
from astro_dynamo.snap import SnapShot
from .snaptools import align_bar
def _symmetrize_matrix(x, dim):
"""Symmetrize a tensor along dimension dim"""
return (x + x.flip(dims=[dim])) / 2
class DynamicalModel(nn.Module):
"""DynamicalModels class. This containts a snapshot of the particles, the potentials
in which they move, and the targets to which the model should be fitted.
Attributes:
snap:
Should be a SnapShot whose masses will be optimised
potentials:
The potentials add. If self gravity is not required set self_gravity_update to None.
If self gravity is required then the potential of the snapshot should be in potentials[0]
and self_gravity_update represents how much to update the running average of the density on
each iteration. Default value is 0.2 which is then exponential averages the density with timescale
5 snapshots(=1/0.2).
targets:
A list of targets. Running
model = DynamicalModel(snap, potentials, targets)
current_target_list = model()
will provide an list of theDynamicalModelse targets evaluated with the present model. These are then
typically combined to a loss that pytorch can optimise.
Methods:
forward()
Computes the targets by evaluating them on the current snapshot. Can also be called as DynamicalModel()
integrate(steps=256)
Integrates the model forward by steps. Updates potential the density assocaiates to potential[0]
update_potential()
Recomputes the accelerations from potential[0]. Adjust each snapshots velocity by a factor vc_new/vc_old
resample()
Resamples the snapshot to equal mass particles.
"""
def __init__(self, snap, potentials, targets, self_gravity_update=0.2):
super(DynamicalModel, self).__init__()
self.snap = snap
self.targets = nn.ModuleList(targets)
self.potentials = nn.ModuleList(potentials)
self.self_gravity_update = self_gravity_update
def forward(self):
return [target(self) for target in self.targets]
def integrate(self, steps=256):
with torch.no_grad():
self.snap.leapfrog_steps(potentials=self.potentials, steps=steps)
if self.self_gravity_update is not None:
self.potentials[0].update_density(self.snap.positions,
self.snap.masses.detach(),
fractional_update=self.self_gravity_update)
def update_potential(self, dm_potential=None, update_velocities=True):
with torch.no_grad():
if update_velocities:
old_accelerations = self.snap.get_accelerations(self.potentials,
self.snap.positions)
old_vc = torch.sum(-old_accelerations * self.snap.positions,
dim=-1).sqrt()
self.potentials[0].rho = _symmetrize_matrix(
_symmetrize_matrix(
_symmetrize_matrix(self.potentials[0].rho, 0), 1), 2)
self.potentials[0].grid_accelerations()
if dm_potential is not None:
self.potentials[1] = dm_potential
if update_velocities:
new_accelerations = self.snap.get_accelerations(self.potentials,
self.snap.positions)
new_vc = torch.sum(-new_accelerations * self.snap.positions,
dim=-1).sqrt()
gd = torch.isfinite(new_vc / old_vc) & (new_vc / old_vc > 0)
self.snap.velocities[gd, :] *= (new_vc / old_vc)[gd, None]
align_bar(self.snap)
def resample(self, velocity_perturbation=0.01):
"""Resample the model to equal mass particles.
Note that the snapshot changes and so the parameters of
the model also change in a way that any optimiser that keeps parameter-by-parameter information e.g.
gradients must also be update."""
with torch.no_grad():
self.snap = self.snap.resample(self.potentials,
velocity_perturbation=velocity_perturbation)
align_bar(self.snap)
def vc(self, components=False, r=torch.linspace(0, 9),
phi=torch.linspace(0, math.pi)):
"""Returns (r,vc) the circular velocity of the model in physical units and locations at which it was evaluated.
If components=True then return list containing the vc of each component, otherwise just return the total.
r optionally specifies the physical radii at which to compute vc
phi specifies the azimuths over which to average."""
phi_grid, r_grid = torch.meshgrid(phi, r)
phi_grid, r_grid = phi_grid.flatten(), r_grid.flatten()
pos = torch.stack((r_grid * torch.cos(phi_grid),
r_grid * torch.sin(phi_grid), 0 * phi_grid)).t()
pos = pos.to(device=self.d_scale.device)
pos /= self.d_scale
vc = []
for potential in self.potentials:
device = next(potential.buffers()).device
acc = potential.get_accelerations(pos.to(device=device)).to(
device=pos.device)
vc += [torch.sum(-acc * pos, dim=1).sqrt().reshape(
phi.shape + r.shape).mean(dim=0) * self.v_scale]
if components:
return r, vc
else:
total_vc = vc[0]
for thisvc in vc[1:]:
total_vc = (total_vc ** 2 + thisvc ** 2).sqrt()
return r, total_vc
class MilkyWayModel(DynamicalModel):
def __init__(self, snap: SnapShot, potentials: List[nn.Module],
targets: List[nn.Module],
self_gravity_update: Union[float, torch.Tensor] = 0.2,
bar_angle: Union[float, torch.Tensor] = 27.,
r_0: Union[float, torch.Tensor] = 8.2,
z_0: Union[float, torch.Tensor] = 0.014,
v_scale: Union[float, torch.Tensor] = 240,
d_scale: Union[float, torch.Tensor] = 1.4,
v_sun: Union[List[float], Tuple[float, float, float],
torch.Tensor] = (11.1, 12.24 + 238.0, 7.25)
):
super(MilkyWayModel, self).__init__(snap, potentials, targets,
self_gravity_update)
self.bar_angle = nn.Parameter(torch.as_tensor(bar_angle),
requires_grad=False)
self.r_0 = nn.Parameter(torch.as_tensor(r_0), requires_grad=False)
self.z_0 = nn.Parameter(torch.as_tensor(z_0), requires_grad=False)
self.v_scale = nn.Parameter(torch.as_tensor(v_scale),
requires_grad=False)
self.d_scale = nn.Parameter(torch.as_tensor(d_scale),
requires_grad=False)
self.v_sun = nn.Parameter(torch.as_tensor(v_sun), requires_grad=False)
@property
def m_scale(self) -> torch.tensor:
G = 4.302E-3 # Gravitational constant in astronomical units
return self.d_scale * 1e3 * self.v_scale ** 2 / G
@property
def t_scale(self) -> torch.tensor:
"""1 iu in time in Gyr"""
return self.d_scale / self.v_scale * 0.977813106 # note that 1km/s is almost 1kpc/Gyr
@property
def xyz(self) -> torch.tensor:
"""Return position of particles in relative to the Sun in cartesian coordinates with units kpc
"""
ddtor = math.pi / 180.
ang = self.bar_angle * ddtor
pos = self.snap.positions
xyz = torch.zeros_like(pos)
inplane_gc_distance = (self.r_0 ** 2 - self.z_0 ** 2).sqrt()
xyz[:, 0] = (pos[:, 0] * torch.cos(-ang) - pos[:, 1] * torch.sin(
-ang)) * self.d_scale + inplane_gc_distance
xyz[:, 1] = (pos[:, 0] * torch.sin(-ang) + pos[:, 1] * torch.cos(
-ang)) * self.d_scale
xyz[:, 2] = pos[:, 2] * self.d_scale - self.z_0
return xyz
@property
def l_b_mu(self) -> torch.tensor:
"""Return array of particles in galactic (l,b,mu) coordinates. (l,b) in degrees. mu is distance modulus"""
xyz = self.xyz
l_b_mu = torch.zeros_like(xyz)
d = (xyz[:, 0] ** 2 + xyz[:, 1] ** 2 + xyz[:, 2] ** 2).sqrt()
l_b_mu[:, 0] = torch.atan2(xyz[:, 1], xyz[:, 0]) * 180 / math.pi
b_offset = torch.asin(
self.z_0 / self.r_0) # the GC has z = -z_0, rotate b coordinate so this is at l,b=(0,0)
l_b_mu[:, 1] = (torch.asin(xyz[:, 2] / d) + b_offset) * 180 / math.pi
l_b_mu[:, 2] = 5 * (100 * d).log10()
return l_b_mu
@property
def masses(self) -> torch.tensor:
return self.snap.masses * self.m_scale
@property
def omega(self) -> torch.tensor:
return self.snap.omega * self.v_scale / self.d_scale
@omega.setter
def omega(self, omega: float):
self.snap.omega = omega / self.v_scale * self.d_scale
@property
def uvw(self) -> torch.tensor:
"""Return UVW velocities.
"""
ddtor = math.pi / 180.
ang = self.bar_angle * ddtor
vxyz = torch.zeros_like(self.snap.positions)
# sun moves at Vsun[0] towards galactic center i.e. other stars are moving away towards larger x
vel = self.snap.velocities * self.v_scale
vxyz[:, 0] = (vel[:, 0] * torch.cos(-ang) - vel[:, 1] * torch.sin(-ang)) + self.v_sun[0]
# sun moves at Vsun[1] in direction of rotation, other stars are going slower than (0,-Vc,0)
vxyz[:, 1] = (vel[:, 0] * torch.sin(-ang) + vel[:, 1] * torch.cos(-ang)) - self.v_sun[1]
# sun is moving towards ngp i.e. other stars on average move at negative vz
vxyz[:, 2] = vel[:, 2] - self.v_sun[2]
return vxyz
@property
def vr(self) -> torch.tensor:
"""Return array of particles radial velocities in [km/s]"""
xyz = self.xyz
vxyz = self.uvw
r = xyz.norm(dim=-1)
vr = (xyz * vxyz).sum(dim=-1) / r
return vr
@property
def mul_mub(self) -> torch.tensor:
"""Return proper motions of particles in [mas/yr] in (l, b).
Proper motion in l is (rate of change of l)*cos(b)"""
xyz = self.xyz
vxyz = self.uvw
r = xyz.norm(dim=-1)
rxy = (xyz[:, 0] ** 2 + xyz[:, 1] ** 2).sqrt()
# magic number comes from: 1 mas/yr = 4.74057 km/s at 1 kpc
mul = (-vxyz[:, 0] * xyz[:, 1] / rxy + vxyz[:, 1] * xyz[:, 0] / rxy) / r / 4.74057
mub = (-vxyz[:, 0] * xyz[:, 2] * xyz[:, 0] / rxy - vxyz[:, 1] * xyz[:, 2] * xyz[:, 1] / rxy + vxyz[:, 2] * rxy) / (
r ** 2) / 4.74057
return torch.stack((mul, mub), dim=-1)
|
import torch
from node2vec import Node2Vec as Node2Vec_
from .brain_data import BrainData
from torch_geometric.data import Data
from networkx.convert_matrix import from_numpy_matrix
from .utils import binning, LDP
import networkx as nx
from .base_transform import BaseTransform
from numpy import linalg as LA
import numpy as np
class FromSVTransform(BaseTransform):
def __init__(self, sv_transform):
super(FromSVTransform, self).__init__()
self.sv_transform = sv_transform
def __call__(self, data):
keys = list(filter(lambda x: x.startswith('edge_index'), data.keys))
for key in keys:
if key.startswith('edge_index'):
postfix = key[10:]
edge_index = data[f'edge_index{postfix}']
edge_attr = data[f'edge_attr{postfix}']
svdata = Data(edge_index=edge_index, edge_attr=edge_attr, num_nodes=data.num_nodes)
svdata_transformed = self.sv_transform(svdata)
data[f'x{postfix}'] = svdata_transformed.x
data[f'edge_index{postfix}'] = svdata_transformed.edge_index
data[f'edge_attr{postfix}'] = svdata_transformed.edge_attr
return data
def __str__(self):
return self.sv_transform.__class__.__name__
class Identity(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns a diagonal matrix with ones on the diagonal.
:param data: BrainData
:return: torch.Tensor
"""
data.x = torch.diag(torch.ones(data.num_nodes))
return data
class Degree(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns a diagonal matrix with the degree of each node on the diagonal.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
data.x = torch.Tensor(adj.sum(dim=1, keepdim=True)).float()
return data
def __str__(self):
return 'Degree'
class LDPTransform(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with LDP transform.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
data.x = torch.Tensor(
LDP(nx.from_numpy_array(adj.numpy()))
).float()
return data
def __str__(self):
return 'LDP'
class DegreeBin(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with degree bin transform.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
return torch.Tensor(binning(adj.sum(dim=1))).float()
def __str__(self):
return 'Degree_Bin'
class Adj(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns adjacency matrix.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
data.x = adj
return data
def __str__(self):
return 'Adj'
class Eigenvector(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with eigenvector.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
w, v = LA.eig(adj.numpy())
# indices = np.argsort(w)[::-1]
v = v.transpose()
data.x = torch.Tensor(v).float()
return data
class EigenNorm(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with eigen norm.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
sum_of_rows = adj.sum(dim=1)
adj /= sum_of_rows
adj = torch.nan_to_num(adj)
w, v = LA.eig(adj.numpy())
# indices = np.argsort(w)[::-1]
v = v.transpose()
data.x = torch.Tensor(v).float()
return data
class Node2Vec(BaseTransform):
def __init__(self, feature_dim=32, walk_length=5, num_walks=200, num_workers=4,
window=10, min_count=1, batch_words=4):
super(Node2Vec, self).__init__()
self.feature_dim = feature_dim
self.walk_length = walk_length
self.num_walks = num_walks
self.num_workers = num_workers
self.window = window
self.min_count = min_count
self.batch_words = batch_words
def __call__(self, data):
"""
Returns node feature with node2vec transform.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
if (adj < 0).int().sum() > 0:
# split the adjacency matrix into two (negative and positive) parts
pos_adj = adj.clone()
pos_adj[adj < 0] = 0
neg_adj = adj.clone()
neg_adj[adj > 0] = 0
neg_adj = -neg_adj
adjs = [pos_adj, neg_adj]
else:
adjs = [adj]
xs = []
for adj in adjs:
x = torch.zeros((data.num_nodes, self.feature_dim))
graph = from_numpy_matrix(adj.numpy())
node2vec = Node2Vec_(graph, dimensions=self.feature_dim, walk_length=self.walk_length,
num_walks=self.num_walks, workers=self.num_workers)
model = node2vec.fit(window=self.window, min_count=self.min_count,
batch_words=self.batch_words)
for i in range(data.num_nodes):
x[i] = torch.Tensor(model.wv[f'{i}'].copy())
xs.append(x)
data.x = torch.cat(xs, dim=-1)
return data
def __str__(self):
return 'Node2Vec'
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tanh bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.bijectors import bijector
__all__ = [
"Tanh",
]
class Tanh(bijector.Bijector):
"""Bijector that computes `Y = tanh(X)`, therefore `Y in (-1, 1)`.
This can be achieved by an affine transform of the Sigmoid bijector, i.e.,
it is equivalent to
```
tfb.Chain([tfb.Affine(shift=-1, scale=2.),
tfb.Sigmoid(),
tfb.Affine(scale=2.)])
```
However, using the `Tanh` bijector directly is slightly faster and more
numerically stable.
"""
def __init__(self, validate_args=False, name="tanh"):
super(Tanh, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return tf.nn.tanh(x)
def _inverse(self, y):
return tf.atanh(y)
def _inverse_log_det_jacobian(self, y):
return -tf.log1p(-tf.square(y))
def _forward_log_det_jacobian(self, x):
# This formula is mathematically equivalent to
# `tf.log1p(-tf.square(tf.tanh(x)))`, however this code is more numerically
# stable.
# Derivation:
# log(1 - tanh(x)^2)
# = log(sech(x)^2)
# = 2 * log(sech(x))
# = 2 * log(2e^-x / (e^-2x + 1))
# = 2 * (log(2) - x - log(e^-2x + 1))
# = 2 * (log(2) - x - softplus(-2x))
return 2. * (np.log(2.) - x - tf.nn.softplus(-2. * x))
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2019 Colin Curtain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Author: Colin Curtain (ccbogel)
https://github.com/ccbogel/QualCoder
https://qualcoder.wordpress.com/
'''
from PyQt5 import QtGui, QtWidgets, QtCore
import os
import sys
import logging
import traceback
from GUI.ui_dialog_settings import Ui_Dialog_settings
home = os.path.expanduser('~')
path = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
def exception_handler(exception_type, value, tb_obj):
""" Global exception handler useful in GUIs.
tb_obj: exception.__traceback__ """
tb = '\n'.join(traceback.format_tb(tb_obj))
text = 'Traceback (most recent call last):\n' + tb + '\n' + exception_type.__name__ + ': ' + str(value)
print(text)
logger.error(_("Uncaught exception: ") + text)
QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text)
class DialogSettings(QtWidgets.QDialog):
""" Settings for the coder name, coder table and to display ids. """
settings = {}
def __init__(self, app, parent=None):
sys.excepthook = exception_handler
self.app = app
self.settings = app.settings
super(QtWidgets.QDialog, self).__init__(parent) # overrride accept method
QtWidgets.QDialog.__init__(self)
self.ui = Ui_Dialog_settings()
self.ui.setupUi(self)
font = 'font: ' + str(self.app.settings['fontsize']) + 'pt '
font += '"' + self.app.settings['font'] + '";'
self.setStyleSheet(font)
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
new_font = QtGui.QFont(self.settings['font'], self.settings['fontsize'], QtGui.QFont.Normal)
self.ui.fontComboBox.setCurrentFont(new_font)
# get coder names from all tables
# Note: does not appear to require a distinct clause
sql = "select owner from code_image union select owner from code_text union select owner from code_av "
sql += " union select owner from cases union select owner from journal union select owner from attribute "
sql += "union select owner from source union select owner from annotation union select owner from code_name "
sql += "union select owner from code_cat"
coders = [""]
if self.app.conn is not None:
cur = self.app.conn.cursor()
cur.execute(sql)
results = cur.fetchall()
for row in results:
coders.append(row[0])
self.ui.comboBox_coders.addItems(coders)
languages = ["Deutsch de", "English en", "Ελληνικά el", "Español es", "Français fr", "日本 jp"]
self.ui.comboBox_language.addItems(languages)
for index, lang in enumerate(languages):
if lang[-2:] == self.settings['language']:
self.ui.comboBox_language.setCurrentIndex(index)
timestampformats = ["[mm.ss]", "[mm:ss]", "[hh.mm.ss]", "[hh:mm:ss]",
"{hh:mm:ss}", "#hh:mm:ss.sss#"]
self.ui.comboBox_timestamp.addItems(timestampformats)
for index, ts in enumerate(timestampformats):
if ts == self.settings['timestampformat']:
self.ui.comboBox_timestamp.setCurrentIndex(index)
speakernameformats = ["[]", "{}"]
self.ui.comboBox_speaker.addItems(speakernameformats)
for index, snf in enumerate(speakernameformats):
if snf == self.settings['speakernameformat']:
self.ui.comboBox_speaker.setCurrentIndex(index)
self.ui.spinBox.setValue(self.settings['fontsize'])
self.ui.spinBox_treefontsize.setValue(self.settings['treefontsize'])
self.ui.lineEdit_coderName.setText(self.settings['codername'])
self.ui.comboBox_coders.currentIndexChanged.connect(self.comboBox_coder_changed)
self.ui.checkBox_auto_backup.stateChanged.connect(self.backup_state_changed)
if self.settings['showids'] == 'True':
self.ui.checkBox.setChecked(True)
else:
self.ui.checkBox.setChecked(False)
if self.settings['backup_on_open'] == 'True':
self.ui.checkBox_auto_backup.setChecked(True)
else:
self.ui.checkBox_auto_backup.setChecked(False)
if self.settings['backup_av_files'] == 'True':
self.ui.checkBox_backup_AV_files.setChecked(True)
else:
self.ui.checkBox_backup_AV_files.setChecked(False)
if self.settings['directory'] == "":
self.settings['directory'] = os.path.expanduser("~")
self.ui.label_directory.setText(self.settings['directory'])
self.ui.pushButton_choose_directory.clicked.connect(self.choose_directory)
def backup_state_changed(self):
""" Enable and disable av backup checkbox. Only enable when checkBox_auto_backup is checked. """
if self.ui.checkBox_auto_backup.isChecked():
self.ui.checkBox_backup_AV_files.setEnabled(True)
else:
self.ui.checkBox_backup_AV_files.setEnabled(False)
def comboBox_coder_changed(self):
""" Set the coder name to the current selection. """
self.ui.lineEdit_coderName.setText(self.ui.comboBox_coders.currentText())
def choose_directory(self):
""" Choose default project directory. """
directory = QtWidgets.QFileDialog.getExistingDirectory(self,
_('Choose project directory'), self.settings['directory'])
if directory == "":
return
self.ui.label_directory.setText(directory)
def accept(self):
self.settings['codername'] = self.ui.lineEdit_coderName.text()
if self.settings['codername'] == "":
self.settings['codername'] = "default"
self.settings['font'] = self.ui.fontComboBox.currentText()
self.settings['fontsize'] = self.ui.spinBox.value()
self.settings['treefontsize'] = self.ui.spinBox_treefontsize.value()
self.settings['directory'] = self.ui.label_directory.text()
if self.ui.checkBox.isChecked():
self.settings['showids'] = 'True'
else:
self.settings['showids'] = 'False'
self.settings['language'] = self.ui.comboBox_language.currentText()[-2:]
self.settings['timestampformat'] = self.ui.comboBox_timestamp.currentText()
self.settings['speakernameformat'] = self.ui.comboBox_speaker.currentText()
if self.ui.checkBox_auto_backup.isChecked():
self.settings['backup_on_open'] = 'True'
else:
self.settings['backup_on_open'] = 'False'
if self.ui.checkBox_backup_AV_files.isChecked():
self.settings['backup_av_files'] = 'True'
else:
self.settings['backup_av_files'] = 'False'
self.save_settings()
self.close()
def save_settings(self):
""" Save settings to text file in user's home directory.
Each setting has a variable identifier then a colon
followed by the value. """
self.app.write_config_ini(self.settings)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = DialogSettings()
ui.show()
sys.exit(app.exec_())
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def sample_program():
return "This is sample flask program"
|
# ---------
# Imports
# ---------
import sys
import os
import stat
import time
import struct
import re
try:
import grp
import pwd
except ImportError:
grp = pwd = None
# ---------------------------------------------------------
# tar constants
# ---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
# ---------------------------------------------------------
# tarfile constants
# ---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
# ---------------------------------------------------------
# Bits used in the mode field, values in octal.
# ---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
# ---------------------------------------------------------
# initialization
# ---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
# ---------------------------------------------------------
# Some useful functions
# ---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
except UnicodeEncodeError:
x = []
for c in s:
try:
x.append(c.encode(encoding, "strict"))
except UnicodeEncodeError:
x.append(c.encode("utf8"))
return "".join(x)
else:
return s.encode(encoding, errors)
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) +
struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) +
struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,
self.name, id(self))
def get_info(self, encoding, errors):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 07777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
for key in ("name", "linkname", "uname", "gname"):
if type(info[key]) is unicode:
info[key] = info[key].encode(encoding, errors)
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info(encoding, errors)
if format == USTAR_FORMAT:
return self.create_ustar_header(info)
elif format == GNU_FORMAT:
return self.create_gnu_header(info)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT)
def create_gnu_header(self, info):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = ""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"],
GNUTYPE_LONGLINK)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
return buf + self._create_header(info, GNU_FORMAT)
def create_pax_header(self, info, encoding, errors):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME),
("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
val = info[name].decode(encoding, errors)
# Try to encode the string as ASCII.
try:
val.encode("ascii")
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if len(info[name]) > length:
pax_headers[hname] = val
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12),
("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = unicode(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ""
return buf + self._create_header(info, USTAR_FORMAT)
def get_headers(self):
headers = {}
for key, value in self.pax_headers.items():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
headers[key.title()] = value
return headers
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100),
itn(info.get("mode", 0) & 07777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100),
stn(info.get("magic", POSIX_MAGIC), 8),
stn(info.get("uname", ""), 32),
stn(info.get("gname", ""), 32),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155)
]
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name += NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
keyword = keyword.encode("utf8")
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.magic = buf[257:265]
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
def _proc_builtin(self, untar_stream):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = untar_stream.offset
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
untar_stream.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(untar_stream.pax_headers, untar_stream.encoding,
untar_stream.errors)
return self
def _proc_gnulong(self, untar_stream):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = untar_stream.next_block(size=self._block(self.size))
if not buf:
return None
# Fetch the next header and process it.
try:
next = untar_stream.read_tarinfo()
if not next:
return None
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def _proc_sparse(self, untar_stream):
"""Process a GNU sparse header plus extra headers.
"""
buf = untar_stream.next_block(size=self._block(self.size))
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = untar_stream.next_block()
if not buf:
return None
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
self.sparse = sp
self.offset_data = untar_stream.offset
untar_stream.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, untar_stream):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = untar_stream.next_block(size=self._block(self.size))
if not buf:
return None
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = untar_stream.pax_headers
else:
pax_headers = untar_stream.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
value = value.decode("utf8")
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = untar_stream.read_tarinfo()
if not next:
return None
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, untar_stream.encoding,
untar_stream.errors)
# next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
untar_stream.offset = offset
return next
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.iteritems():
if keyword not in PAX_FIELDS:
continue
if keyword == "path":
value = value.rstrip("/")
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
class Path:
def __init__(self, type, file_name, size, data):
"""
:param data:
stringio, buffer, or file
"""
self.type = type
self.file_name = file_name
self.size = size
self.data = data
def __iter__(self):
for chunk in self.data:
yield chunk
class RegFile:
def __init__(self, file_name, chunk_size=65536):
self.file_name = file_name
self.fp = None
self.chunk_size = chunk_size
self.size = 0L
self.type = DIRTYPE
if hasattr(os, "lstat"):
statres = os.lstat(file_name)
else:
statres = os.stat(file_name)
if stat.S_ISREG(statres.st_mode):
self.size = statres.st_size
self.type = REGTYPE
def __iter__(self):
self.fp = open(self.file_name, 'rb')
return self
def next(self):
chunk = self.fp.read(self.chunk_size)
if chunk:
return chunk
else:
if self.fp:
self.fp.close()
self.fp = None
raise StopIteration
class StringBuffer:
def __init__(self, name, body=''):
self.name = name
self.file_name = name
self.size = len(body)
self.body = body
self.is_closed = False
self.type = REGTYPE
def write(self, data):
if not self.is_closed:
self.body += data
def close(self):
self.is_closed = True
class TarStream(object):
errors = 'strict'
def __init__(self, tar_iter=None, path_list=None, chunk_size=65536,
format=DEFAULT_FORMAT, encoding=ENCODING, append=False):
"""
:param path_list:
List of `Path` objects.
"""
self.tar_iter = tar_iter
self.path_list = path_list
self.chunk_size = chunk_size
self.format = format
self.encoding = encoding
self.to_write = self.chunk_size
self.data = ''
self.file_len = 0
self.append = append
def serve_chunk(self, buf):
# always serve chunks of `self.chunk_size`, or nothing
self.to_write -= len(buf)
if self.to_write < 0:
self.data += buf[:self.to_write]
self.file_len += self.chunk_size
yield self.data
self.data = buf[self.to_write:]
self.to_write += self.chunk_size
else:
self.data += buf
def create_tarinfo(self, path=None, ftype=None, name=None, size=None,
headers=None):
tarinfo = TarInfo()
tarinfo.tarfile = None
if path:
tarinfo.type = path.type
tarinfo.name = path.file_name
tarinfo.size = path.size
else:
tarinfo.type = ftype
tarinfo.name = name
tarinfo.size = size
tarinfo.mtime = time.time()
if headers:
tarinfo.pax_headers = dict(headers)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
return buf
@classmethod
def get_archive_size(cls, file_size):
size = file_size + BLOCKSIZE - 1
return (size / BLOCKSIZE) * BLOCKSIZE
def get_total_stream_length(self):
# In bytes. Used for content-length calculation.
size = 0
for path in self.path_list:
size += TarStream.get_archive_size(path.size)
size += len(self.create_tarinfo(path=path))
size += BLOCKSIZE * 2
return size
def __iter__(self):
if self.append:
if self.tar_iter:
for data in self.tar_iter:
for chunk in self.serve_chunk(data):
yield chunk
for path in self.path_list:
buf = self.create_tarinfo(path=path)
for chunk in self.serve_chunk(buf):
yield chunk
for file_data in path:
for chunk in self.serve_chunk(file_data):
yield chunk
self.file_len += len(self.data)
blocks, remainder = divmod(self.file_len, BLOCKSIZE)
if remainder > 0:
nulls = NUL * (BLOCKSIZE - remainder)
for chunk in self.serve_chunk(nulls):
yield chunk
self.file_len = 0
if not self.append:
if self.tar_iter:
for data in self.tar_iter:
for chunk in self.serve_chunk(data):
yield chunk
else:
for chunk in self.serve_chunk(NUL * (BLOCKSIZE * 2)):
yield chunk
if self.data:
yield self.data
class ExtractedFile(object):
def __init__(self, untar_stream):
self.untar_stream = untar_stream
self.data = ''
def read(self, size=None):
if size is None:
size = self.untar_stream.to_write
if size:
if self.untar_stream.to_write:
while len(self.data) < size:
chunk = self.untar_stream.get_file_chunk()
if not chunk:
result = self.data[:]
self.data = ''
return result
self.data += chunk
if self.untar_stream.to_write:
self.untar_stream.block = ''
try:
data = next(self.untar_stream.tar_iter)
except StopIteration:
result = self.data[:]
self.data = ''
return result
self.untar_stream.update_buffer(data)
else:
result = self.data[:]
self.data = ''
return result
result = self.data[:size]
self.data = self.data[size:]
return result
return ''
class UntarStream(object):
def __init__(self, tar_iter, path_list=None, encoding=ENCODING,
errors=None):
self.tar_iter = iter(tar_iter)
if path_list is None:
path_list = []
self.path_list = path_list
self.block = ''
self.encoding = encoding
self.errors = errors
self.pax_headers = {}
self.offset = 0
self.offset_data = 0
self.to_write = 0
self.fp = None
self.format = None
def update_buffer(self, data):
if self.block:
self.block += data
else:
self.block = data
def __iter__(self):
while True:
try:
data = next(self.tar_iter)
except StopIteration:
break
self.update_buffer(data)
info = self.get_next_tarinfo()
while info:
if info.offset_data:
for f in self.path_list:
if info.name == f.name:
self.fp = f
break
self.to_write = info.size
self.offset_data = info.offset_data
while self.to_write:
if self.fp:
self.fp.write(self.get_file_chunk())
if not self.to_write:
self.fp.close()
self.fp = None
else:
self.skip_file_chunk()
if self.to_write:
self.block = ''
yield data
try:
data = next(self.tar_iter)
except StopIteration:
break
self.update_buffer(data)
info = self.get_next_tarinfo()
yield data
def next_block(self, size=BLOCKSIZE):
if size > len(self.block):
return None
stop = self.offset + size
if stop > len(self.block):
self.block = self.block[self.offset:]
self.offset = 0
return None
start = self.offset
self.offset = stop
return self.block[start:stop]
def read_tarinfo(self):
buf = self.next_block()
if not buf:
return None
tarinfo = TarInfo.frombuf(buf)
tarinfo.offset = self.offset - BLOCKSIZE
if tarinfo.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return tarinfo._proc_gnulong(self)
elif tarinfo.type == GNUTYPE_SPARSE:
return tarinfo._proc_sparse(self)
elif tarinfo.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return tarinfo._proc_pax(self)
else:
return tarinfo._proc_builtin(self)
def write_file(self):
chunk = self.get_file_chunk()
if self.fp:
self.fp.write(chunk)
if not self.to_write:
self.fp.close()
self.fp = None
def get_file_chunk(self):
# get a chunk of an entire file
buf_size = len(self.block)
eof = self.offset_data + self.to_write
if eof <= buf_size:
self.to_write = 0
return self.block[self.offset_data:eof]
start = self.offset_data
self.offset_data = 0
self.offset -= buf_size
self.to_write = eof - buf_size
return self.block[start:]
def skip_file_chunk(self):
# skip an entire file
buf_size = len(self.block)
eof = self.offset_data + self.to_write
if eof < buf_size:
self.to_write = 0
return
self.offset_data = 0
self.offset -= buf_size
self.to_write = eof - buf_size
def get_next_tarinfo(self):
info = None
while True:
try:
info = self.read_tarinfo()
except EOFHeaderError:
self.offset += BLOCKSIZE
continue
except InvalidHeaderError, e:
if self.offset == 0:
raise ReadError(str(e))
self.offset += BLOCKSIZE
continue
break
if info:
if info.magic == GNU_MAGIC:
self.format = GNU_FORMAT
elif info.magic == POSIX_MAGIC:
self.format = USTAR_FORMAT
return info
def untar_file_iter(self):
while self.to_write:
yield self.get_file_chunk()
if self.to_write:
self.block = ''
try:
data = next(self.tar_iter)
except StopIteration:
break
self.update_buffer(data)
if __name__ == "__main__":
if len(sys.argv) < 4:
print ('Usage: tarstream.py cf|xf <tar source> <tar dest> '
'<filtered files>')
exit()
op = sys.argv.pop(1)
src = sys.argv.pop(1)
dst = sys.argv.pop(1)
path_list = sys.argv[1:]
chunk_size = 65536
if op not in ['cf', 'xf']:
print ('Usage: tarstream.py cf|xf <tar source> <tar dest> '
'<filtered files>')
src_iter = None
if src not in '-':
src_iter = RegFile(src, chunk_size)
dst_fp = open(dst, 'wb')
if op in 'cf':
path_list = [RegFile(path, chunk_size) for path in path_list]
for data in TarStream(src_iter, path_list, chunk_size):
dst_fp.write(data)
elif op in 'xf':
path_list = [open(path, 'wb') for path in path_list]
for data in UntarStream(src_iter, path_list):
dst_fp.write(data)
dst_fp.close()
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2016 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2016-03-13
#
"""searchio <command> [<options>] [<args>...]
Alfred 3 workflow to provide search completion suggestions
from various search engines in various languages.
Usage:
searchio <command> [<args>...]
searchio -h|--version
Options:
-h, --help Display this help message
--version Show version number and exit
Commands:
add Add a new search to the workflow
clean Delete stale cache files
config Display (filtered) settings
help Show help for a command
list Display (filtered) list of engines
reload Update info.plist
search Perform a search
variants Display (filtered) list of engine variants
web Import a new search from a URL
"""
from __future__ import print_function, absolute_import
import os
import sys
from searchio import util
log = util.logger(__name__)
def usage(wf=None):
"""CLI usage instructions."""
return __doc__
def cli(wf):
"""Script entry point.
Args:
wf (worflow.Workflow3): Active workflow object.
"""
from docopt import docopt
vstr = '{} v{}'.format(wf.name, wf.version)
wf.args
args = docopt(usage(wf), version=vstr, options_first=True)
log.debug('args=%r', args)
cmd = args.get('<command>')
argv = [cmd] + args.get('<args>')
# ---------------------------------------------------------
# Initialise
for fn in ('backups', 'engines', 'icons', 'searches'):
try:
os.makedirs(wf.datafile(fn), 0700)
except Exception as err:
if err.errno != 17: # ignore file exists
raise err
# ---------------------------------------------------------
# Call sub-command
if cmd == 'add':
from searchio.cmd.add import run
return run(wf, argv)
elif cmd == 'clean':
from searchio.cmd.clean import run
return run(wf, argv)
elif cmd == 'config':
from searchio.cmd.config import run
return run(wf, argv)
elif cmd == 'fetch':
from searchio.cmd.fetch import run
return run(wf, argv)
elif cmd == 'help':
from searchio.cmd.help import run
return run(wf, argv)
elif cmd == 'list':
from searchio.cmd.list import run
return run(wf, argv)
if cmd == 'reload':
from searchio.cmd.reload import run
return run(wf, argv)
if cmd == 'search':
from searchio.cmd.search import run
return run(wf, argv)
if cmd == 'toggle':
from searchio.cmd.toggle import run
return run(wf, argv)
if cmd == 'user':
from searchio.cmd.user import run
return run(wf, argv)
elif cmd == 'variants':
from searchio.cmd.variants import run
return run(wf, argv)
elif cmd == 'web':
from searchio.cmd.web import run
return run(wf, argv)
else:
raise ValueError('Unknown command "{}". Use -h for help.'.format(cmd))
def main():
from workflow import Workflow3
from searchio import UPDATE_SETTINGS, HELP_URL
wf = Workflow3(update_settings=UPDATE_SETTINGS,
help_url=HELP_URL)
sys.exit(wf.run(cli))
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.Home, name="home"),
path('portfolio', views.portfolio, name="portfolio"),
path('news', views.news, name="new"),
path('contacts', views.contacts, name="contacts"),
path('about', views.about, name="about"),
path('product/<str:pk>', views.details, name="product")
]
|
# -*- coding: utf-8 -*-
from django import template
register = template.Library()
@register.filter(name='times')
def times(value, arg):
return value * int(arg)
|
import os
from flask import Flask, jsonify
from scraper import Scraper
app = Flask(__name__)
scraper = Scraper()
@app.route("/")
def store_playstation():
return jsonify(scraper.store_playstation("https://store.playstation.com/ja-jp/category/1b6c3e7d-4445-4cef-a046-efd94a1085b7/"))
if __name__ == "__main__":
app.run(port=8001,debug=True)
|
import yfinance
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
def _simulate_returns(historical_returns,forecast_days):
return historical_returns.sample(n = forecast_days,
replace = True).reset_index(drop = True)
def simulate_modified_returns(
historical_returns,
forecast_days,
correct_mean_by):
h = historical_returns.copy()
new_series = h + correct_mean_by
return new_series.sample(n=forecast_days,
replace = True).reset_index(drop=True)
def simulate_portfolio(historical_returns,composition,forecast_days):
result = 0
for t in tqdm(composition):
name,weight = t[0],t[1]
s = _simulate_returns(historical_returns['return_%s' % (name)], forecast_days)
result = result + s * weight
return(result)
def simulate_modified_portfolio(
historical_returns,
composition,
forecast_days):
result = 0
for t in composition:
name,weight,correction = t[0],t[1],t[2]
s = simulate_modified_returns(
historical_returns['return_%s' % (name)],
forecast_days,correction
)
result = result + s * weight
return(result)
def simulation(historical_returns,composition,forecast_days,n_iterations):
simulated_portfolios = None
for i in range(n_iterations):
sim = simulate_modified_portfolio(historical_returns,composition,forecast_days)
sim_port = pd.DataFrame({'returns_%d' % (i) : sim})
if simulated_portfolios is None:
simulated_portfolios = sim_port
else:
simulated_portfolios = simulated_portfolios.join(sim_port)
return simulated_portfolios
if __name__ == '__main__':
portfolio_composition = [('MSFT',0.5),('AAPL',0.2),('GOOG',0.3)]
returns = pd.DataFrame({})
# create returns portfolio dataframe
for t in portfolio_composition:
name = t[0]
ticker = yfinance.Ticker(name)
data = ticker.history(interval="1d",start="2010-01-01",end="2019-12-31")
data['return_%s' % (name)] = data['Close'].pct_change(1)
returns = returns.join(data[['return_%s' % (name)]],how="outer").dropna()
# Monte Carlo simulation of a portfolio
# simulate_portfolio(returns,portfolio_composition,10)
# This may be enough for portfolio simulation, but we want something more, that is the what-if analysis.
# print("The historical average returns are : \n", returns.mean(axis=0))
'''
If we perform portfolio simulation as shown before,
we are simply saying that the future returns are a random sample
of the past returns. We already know this isn’t completely true.
Moreover, maybe we are performing scenario analysis because
we want to know what happens if certain conditions will occur.
For example, what happens if the average daily return of each stock
is lower than its historical value?If we perform portfolio
simulation as shown before, we are simply saying that the future returns
are a random sample of the past returns. We already know this
isn’t completely true. Moreover, maybe we are performing scenario analysis
because we want to know what happens if certain conditions will occur.
For example, what happens if the average daily return of each
stock is lower than its historical value?
'''
print('Let’s try to simulate what happens if the average \
returns drop by -0.0001 for MSFT, -0.001 for AAPL and -0.0005 for GOOG. \
We must subtract these quantities from each stock and then simulate the \
future portfolios with the new, modified data.')
# We’ll add these corrections directly to the portfolio_composition list (they are the third component of each tuple):
new_portfolio_composition = [
('MSFT', 0.5,-0.0001),
('AAPL', 0.2,-0.001),
('GOOG', 0.3,-0.0005)
]
# Simulations and results
forecast_days = 20
n_iterations = 200
simulated_portfolios = simulation(returns,
new_portfolio_composition,forecast_days,n_iterations)
# Taken the daily returns of a portfolio, we can build the return after N days with the compound interest formula:
percentile_5th = simulated_portfolios.cumsum().apply(lambda x : np.percentile(x,5),axis=1)
percentile_95th = simulated_portfolios.cumsum().apply(lambda x : np.percentile(x,95),axis=1)
average_port = simulated_portfolios.cumsum().apply(lambda x : np.mean(x),axis=1)
print(percentile_5th.tail(1))
print(percentile_95th.tail(1))
print(average_port.tail(1))
# Confidence interval for future portfolios
x = range(forecast_days)
plt.rcParams['figure.figsize'] = [10, 10]
plt.plot(x,average_port,label="Average portfolio")
plt.xlabel("Day")
plt.ylabel("Portfolio return")
plt.fill_between(x, percentile_5th, percentile_95th,alpha=0.2)
plt.grid()
plt.legend()
plt.show()
# Probability of beating the portfolio target
target_return = 0.02
target_prob_port = simulated_portfolios.cumsum().apply(lambda x : np.mean(x > target_return),axis=1)
print("Probabilityof beating the portfolio target {} ".format(target_return),target_prob_port.tail(1))
# The size of the error bars is calculated with the standard error formula:
err_bars = np.sqrt(
target_prob_port * (1-target_prob_port) / n_iterations
)
x = range(forecast_days)
plt.rcParams['figure.figsize'] = [10, 10]
plt.bar(x,target_prob_port,yerr = err_bars)
plt.xlabel("Day")
plt.ylabel("Probability of return >= %.2f" % (target_return))
plt.grid()
plt.show()
# Sharpe ratio histogram
'''
performance metric of a portfolio
'''
sharpe_indices = simulated_portfolios.apply(lambda x : np.mean(x)/np.std(x))
plt.hist(sharpe_indices,bins="rice")
plt.xlabel("Sharpe ratio")
plt.show()
print("Sharpe ratio mean value",np.mean(sharpe_indices))
|
def extractWriterupdatesCom(item):
'''
Parser for 'writerupdates.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
# -*- coding: utf-8 -*-
from zvt.contract import IntervalLevel
from zvt.factors.target_selector import TargetSelector
from zvt.factors.ma.ma_factor import CrossMaFactor
from zvt.factors import BullFactor
from ..context import init_test_context
init_test_context()
class TechnicalSelector(TargetSelector):
def init_factors(self, entity_ids, entity_schema, exchanges, codes, the_timestamp, start_timestamp,
end_timestamp, level):
bull_factor = BullFactor(entity_ids=entity_ids, entity_schema=entity_schema, exchanges=exchanges,
codes=codes, the_timestamp=the_timestamp, start_timestamp=start_timestamp,
end_timestamp=end_timestamp, provider='joinquant', level=level, adjust_type='qfq')
self.filter_factors = [bull_factor]
def test_cross_ma_selector():
entity_ids = ['stock_sz_000338']
entity_type = 'stock'
start_timestamp = '2018-01-01'
end_timestamp = '2019-06-30'
my_selector = TargetSelector(entity_ids=entity_ids,
entity_schema=entity_type,
start_timestamp=start_timestamp,
end_timestamp=end_timestamp)
# add the factors
my_selector \
.add_filter_factor(CrossMaFactor(entity_ids=entity_ids,
start_timestamp=start_timestamp,
end_timestamp=end_timestamp,
computing_window=10,
windows=[5, 10],
need_persist=False,
level=IntervalLevel.LEVEL_1DAY,
adjust_type='qfq'))
my_selector.run()
print(my_selector.open_long_df)
print(my_selector.open_short_df)
assert 'stock_sz_000338' in my_selector.get_open_short_targets('2018-01-29')
def test_technical_selector():
selector = TechnicalSelector(start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider='joinquant')
selector.run()
print(selector.get_result_df())
targets = selector.get_open_long_targets('2019-06-04')
assert 'stock_sz_000338' not in targets
assert 'stock_sz_000338' not in targets
assert 'stock_sz_002572' not in targets
assert 'stock_sz_002572' not in targets
targets = selector.get_open_short_targets('2019-06-04')
assert 'stock_sz_000338' in targets
assert 'stock_sz_000338' in targets
assert 'stock_sz_002572' in targets
assert 'stock_sz_002572' in targets
selector.move_on(timeout=0)
targets = selector.get_open_long_targets('2019-06-19')
assert 'stock_sz_000338' in targets
assert 'stock_sz_002572' not in targets
|
#!/usr/bin/python
from capstone import *
from unicorn import *
import regress
class MipsBranchDelay(regress.RegressTest):
def runTest(self):
md = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_LITTLE_ENDIAN)
def disas(code, addr):
for i in md.disasm(code, addr):
print '0x%x: %s %-6s %s' % (i.address, str(i.bytes).encode('hex'), i.mnemonic, i.op_str)
def hook_code(uc, addr, size, _):
mem = str(uc.mem_read(addr, size))
disas(mem, addr)
CODE = 0x400000
asm = '0000a4126a00822800000000'.decode('hex') # beq $a0, $s5, 0x4008a0; slti $v0, $a0, 0x6a; nop
print 'Input instructions:'
disas(asm, CODE)
print
print 'Hooked instructions:'
uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN)
uc.hook_add(UC_HOOK_CODE, hook_code)
uc.mem_map(CODE, 0x1000)
uc.mem_write(CODE, asm)
self.assertEqual(None, uc.emu_start(CODE, CODE + len(asm)))
if __name__ == '__main__':
regress.main()
|
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import docker
import requests
from dockerfile_parse import DockerfileParser
from atomic_reactor.plugin import PluginFailedException
from atomic_reactor.build import InsideBuilder, BuildResult
from atomic_reactor.util import ImageName, CommandResult
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.constants import INSPECT_ROOTFS, INSPECT_ROOTFS_LAYERS
from tests.docker_mock import mock_docker
from flexmock import flexmock
import pytest
from tests.constants import MOCK_SOURCE
class MockDocker(object):
def history(self, name):
return []
class MockDockerTasker(object):
def __init__(self):
self.d = MockDocker()
def inspect_image(self, name):
return {}
def build_image_from_path(self):
return True
class X(object):
pass
class MockInsideBuilder(object):
def __init__(self, failed=False, image_id=None):
self.tasker = MockDockerTasker()
self.base_image = ImageName(repo='Fedora', tag='22')
self.image_id = image_id or 'asd'
self.image = 'image'
self.failed = failed
self.df_path = 'some'
self.df_dir = 'some'
def simplegen(x, y):
yield "some\u2018".encode('utf-8')
flexmock(self.tasker, build_image_from_path=simplegen)
@property
def source(self):
return flexmock(
dockerfile_path='/',
path='/tmp',
config=flexmock(image_build_method='docker_api'),
)
def pull_base_image(self, source_registry, insecure=False):
pass
def get_built_image_info(self):
return {'Id': 'some'}
def inspect_built_image(self):
return {INSPECT_ROOTFS: {INSPECT_ROOTFS_LAYERS: []}}
def ensure_not_built(self):
pass
@pytest.mark.parametrize('is_failed', [
True,
False,
])
@pytest.mark.parametrize('image_id', ['sha256:12345', '12345'])
def test_build(is_failed, image_id):
"""
tests docker build api plugin working
"""
flexmock(DockerfileParser, content='df_content')
mock_docker()
fake_builder = MockInsideBuilder(image_id=image_id)
flexmock(InsideBuilder).new_instances(fake_builder)
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
flexmock(CommandResult).should_receive('is_failed').and_return(is_failed)
error = "error message"
error_detail = "{u'message': u\"%s\"}" % error
if is_failed:
flexmock(CommandResult, error=error, error_detail=error_detail)
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
else:
workflow.build_docker_image()
assert isinstance(workflow.buildstep_result['docker_api'], BuildResult)
assert workflow.build_result == workflow.buildstep_result['docker_api']
assert workflow.build_result.is_failed() == is_failed
if is_failed:
assert workflow.build_result.fail_reason == error
assert '\\' not in workflow.plugins_errors['docker_api']
assert error in workflow.plugins_errors['docker_api']
else:
assert workflow.build_result.image_id.startswith('sha256:')
assert workflow.build_result.image_id.count(':') == 1
def test_syntax_error():
"""
tests reporting of syntax errors
"""
flexmock(DockerfileParser, content='df_content')
mock_docker()
fake_builder = MockInsideBuilder()
def raise_exc(*args, **kwargs):
explanation = ("Syntax error - can't find = in \"CMD\". "
"Must be of the form: name=value")
http_error = requests.HTTPError('500 Server Error')
raise docker.errors.APIError(message='foo',
response=http_error,
explanation=explanation)
yield {}
fake_builder.tasker.build_image_from_path = raise_exc
flexmock(InsideBuilder).new_instances(fake_builder)
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
assert isinstance(workflow.buildstep_result['docker_api'], BuildResult)
assert workflow.build_result == workflow.buildstep_result['docker_api']
assert workflow.build_result.is_failed()
assert "Syntax error" in workflow.build_result.fail_reason
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import urlparse
import re
class Parser(object):
def _pre_test(self, url, soup):
bad_links = {}
if soup.title == "手机搜狐" and url != 'http://m.sohu.com':
bad_links[url] = 404
# todo:坏链处理。。。
return False, bad_links
return True, None
# todo:js去掉
def _get_new_urls(self, soup):
new_urls = []
for link in soup.find_all('a'):
# todo: 删掉这个try
# try:
link_href = unicode(link.get('href')).encode('utf-8')
# except:
# print link.get('href')
# todo:处理外链
if link_href.startswith('/'):
new_url = 'http://m.sohu.com'+''.join(link_href)
elif link_href.startswith('http://m.sohu.com'):
new_url = link_href
# else:
# break
try:
new_urls.append(new_url)
except:
print u'外链', link_href
return new_urls
def parse(self, page_url, html_content):
if html_content is None:
return
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')
state, bad_links = self._pre_test(page_url, soup)
if state:
new_urls = self._get_new_urls(soup)
return True, new_urls
else:
return False, bad_links
|
from .inverted_residual import InvertedResidual, InvertedResidualV3
from .non_bottleneck_1d import non_bottleneck_1d
from .dilated_bottleneck import DilatedBottleneck
|
from hashlib import sha256
from os import urandom
from btcpy.structs.crypto import PublicKey, PrivateKey
from btcpy.structs.transaction import MutableTransaction, TxOut
from btcpy.structs.sig import P2pkhSolver
from pypeerassets.networks import net_query
class Kutil:
def __init__(self, network: str, privkey: bytearray=None, from_string: str=None,
from_wif: str=None) -> None:
'''
High level helper class for handling public key cryptography.
: privkey - privatekey bytes
: from_wif - <WIF> import private key from your wallet in WIF format
: from_bytes - import private key in binary format
: network - specify network [ppc, tppc, btc]
: from_string - specify seed (string) to make the privkey from
'''
self.network = network
self.btcpy_constants = net_query(self.network).btcpy_constants
if privkey is not None:
self._private_key = PrivateKey(privkey)
if from_string is not None:
self._private_key = PrivateKey(sha256(
from_string.encode()).digest())
if from_wif is not None:
self._private_key = PrivateKey.from_wif(wif=from_wif,
network=self.btcpy_constants,
)
if not privkey:
if from_string == from_wif is None: # generate a new privkey
self._private_key = PrivateKey(bytearray(urandom(32)))
self.privkey = str(self._private_key)
self._public_key = PublicKey.from_priv(self._private_key)
self.pubkey = str(self._public_key)
@property
def address(self) -> str:
'''generate an address from pubkey'''
btcpy_constants = net_query(self.network).btcpy_constants
return str(self._public_key.to_address(btcpy_constants))
@property
def wif(self) -> str:
'''convert raw private key to WIF'''
return self._private_key.to_wif(network=self.btcpy_constants)
def sign_transaction(self, txin: TxOut,
tx: MutableTransaction) -> MutableTransaction:
'''sign the parent txn outputs P2PKH'''
solver = P2pkhSolver(self._private_key)
return tx.spend([txin], [solver])
|
# MIT License
#
# Copyright (c) 2022 Ferhat Geçdoğan All Rights Reserved.
# Distributed under the terms of the MIT License.
#
#
# evalie - a toy evaluator using
# shunting-yard algorithm.
# ------
# github.com/ferhatgec/evalie
#
import math
class evalie:
def __init__(self):
self.precedence = {
'+': 2,
'-': 2,
'*': 3,
'/': 3,
'!': 4,
'^': 4,
'%': 4
}
self.left = 0
self.right = 0
self.op = ''
self.stack = self.evalie_values()
self.pi = str(math.pi)
self.e = str(math.e)
self.tau = str(math.tau)
self.golden_ratio = str(1.618033988749895)
class evalie_values:
def __init__(self):
self.values = []
self.operators = []
@staticmethod
def check_none(val):
return val if val is not None else -1
def get_precedence(self, ch) -> int:
return self.check_none(self.precedence.get(ch))
def perform(self):
if self.left is None:
self.left = 0
if self.right is None:
self.right = 0
match self.op:
case '+':
return self.left + self.right
case '-':
return self.right - self.left
case '*':
return self.left * self.right
case '/':
return self.right / self.left
case '^':
return self.right ** self.left
case '!':
return float(math.factorial(int(self.left)))
case '%':
return self.right % self.left
def pop(self, data):
if type(data) == float:
data = [data]
return data.pop()
if len(data) > 0:
val = data.pop()
return val
def precalc(self, data: str):
return data.replace('pi', self.pi) \
.replace('π', self.pi) \
.replace('e', self.e) \
.replace('tau', self.tau) \
.replace('τ', self.tau) \
.replace('phi', self.golden_ratio) \
.replace('φ', self.golden_ratio) \
.replace('mod', '%')\
.replace('+', ' + ')\
.replace('-', ' - ')\
.replace('/', ' / ')\
.replace('*', ' * ')
def clear(self):
self.left = self.right = 0
self.op = 0
self.stack = self.evalie_values()
def eval(self, data):
data = self.precalc(data)
i = 0
while i < len(data):
match data[i]:
case ' ':
i += 1
continue
case '(':
self.stack.operators.append(data[i])
case ')':
while len(self.stack.operators) != 0 and self.stack.operators[-1] != '(':
self.left = self.pop(self.stack.values)
self.right = self.pop(self.stack.values)
self.op = self.pop(self.stack.operators)
self.stack.values.append(self.perform())
self.pop(self.stack.operators)
case _ if data[i].isdigit() or (data[i] == '-' and self.left > 0 and self.right == 0):
value = ''
while i < len(data) and (data[i].isdigit() or data[i] == '.' or data[i] == '-'):
value += data[i]
i += 1
value = float(value)
self.stack.values.append(value)
i -= 1
case _ as arg:
while (len(self.stack.operators) != 0
and self.get_precedence(self.stack.operators[-1]) >=
self.get_precedence(arg)):
self.left = self.pop(self.stack.values)
if self.stack.operators[-1] != '!':
self.right = self.pop(self.stack.values)
self.op = self.pop(self.stack.operators)
self.stack.values.append(self.perform())
self.stack.operators.append(data[i])
i += 1
while len(self.stack.operators) != 0:
self.left = self.pop(self.stack.values)
self.right = self.pop(self.stack.values)
self.op = self.pop(self.stack.operators)
self.stack.values = self.perform()
if type(self.stack.values) == float:
self.stack.values = [self.stack.values]
if type(self.stack.values) == list and len(self.stack.values) > 0:
return self.stack.values[-1]
|
import os
import pickle
from .Utils import purify, staticPath
def cacheIn(dir, name, data):
"""
Store given `data` under ./cache/dir/name.pickle file.
Note that `dir` and `name` are "purified" before used!
-dir: string of sub-directory to be created. Cache-file will be stored in it.
It shouldn't be None.
-name: string of filename without any extension. Cache-file will be named
after it. It shouldn't be None.
-data: python object to be cached.
"""
path = staticPath(__file__, "cache")
dir = purify(dir)
name = purify(name)
path = os.path.join(path, dir)
# If specified file exists, overwrite it without errors or warnings.
os.makedirs(path, exist_ok=True)
filename = name + ".pickle"
path = os.path.join(path, filename)
with open(path, "wb") as file:
pickle.dump(data, file)
def cacheOut(dir, name):
"""
Try to retrieve cached data under `./cache/dir/name.pickle`. If the
cache-file doesn't exist, None is being returned.
Note that `dir` and `name` are "purified" before used!
-dir: string of sub-directory to searched for cache-file. It shouldn't be
None.
-name: string of filename to be searched without any extension. It shouldn't
be None.
"""
data = None
path = staticPath(__file__, "cache")
dir = purify(dir)
name = purify(name)
filename = name + ".pickle"
path = os.path.join(path, dir, filename)
if os.path.isfile(path):
with open(path, "rb") as file:
data = pickle.load(file)
return data
|
""""
Program name : Website cloner
author : https://github.com/codeperfectplus
How to use : Check README.md
"""
import os
import sys
import requests
from bs4 import BeautifulSoup
class CloneWebsite:
def __init__(self, website_name):
self.website_name = website_name
def crawl_website(self):
""" This function will crawl website and return content"""
content = requests.get(website_name)
if content.status_code == 200:
return content
def create_folder(self):
""" This funtion will create folder for website """
folder_name = (website_name.split("/"))[2]
try:
os.makedirs(folder_name)
except Exception as e:
print(e)
return folder_name
def save_website(self):
""" This function will save website to respective folder """
folder_name = self.create_folder()
content = self.crawl_website()
with open(
f"{folder_name}/index.html", "w", encoding="ascii", errors="ignore"
) as file:
file.write(content.text)
def save_image(self):
folder_name = self.create_folder()
os.chdir(folder_name)
data = requests.get(website_name).text
soup = BeautifulSoup(data, "html.parser")
for img in soup.find_all("img"):
src = img["src"]
print(src)
image_name = src.split("/")[-1]
path = src.split("/")[:-1]
path = "/".join(path)
try:
os.makedirs(path)
except Exception:
print("File Exists")
if "/" == src[:1]:
print(src)
src = website_name + src
img_data = requests.get(src).content
with open(f"{path}/{image_name}", "wb") as file:
file.write(img_data)
print("complete")
if __name__ == "__main__":
website_name = sys.argv[1]
clone = CloneWebsite(website_name)
clone.save_website()
clone.save_image()
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.python_executor_operator."""
import os
from typing import Any, Dict, List
import tensorflow as tf
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable import python_executor_operator
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import standard_artifacts
from tfx.utils import test_case_utils
from google.protobuf import text_format
class InprocessExecutor(base_executor.BaseExecutor):
"""A Fake in-process executor what returns execution result."""
def Do(
self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> execution_result_pb2.ExecutorOutput:
executor_output = execution_result_pb2.ExecutorOutput()
outputs_utils.populate_output_artifact(executor_output, output_dict)
outputs_utils.populate_exec_properties(executor_output, exec_properties)
return executor_output
class NotInprocessExecutor(base_executor.BaseExecutor):
"""A Fake not-in-process executor what writes execution result to executor_output_uri."""
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
executor_output = execution_result_pb2.ExecutorOutput()
outputs_utils.populate_output_artifact(executor_output, output_dict)
outputs_utils.populate_exec_properties(executor_output, exec_properties)
with fileio.open(self._context.executor_output_uri, 'wb') as f:
f.write(executor_output.SerializeToString())
class InplaceUpdateExecutor(base_executor.BaseExecutor):
"""A Fake executor that uses the executor Context to compute its output."""
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
model = output_dict['output_key'][0]
model.name = '{0}.{1}.my_model'.format(
self._context.pipeline_info.id,
self._context.pipeline_node.node_info.id)
class PythonExecutorOperatorTest(test_case_utils.TfxTest):
def _get_execution_info(self, input_dict, output_dict, exec_properties):
pipeline_node = pipeline_pb2.PipelineNode(node_info={'id': 'MyPythonNode'})
pipeline_info = pipeline_pb2.PipelineInfo(id='MyPipeline')
stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')
executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')
return data_types.ExecutionInfo(
execution_id=1,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
stateful_working_dir=stateful_working_dir,
execution_output_uri=executor_output_uri,
pipeline_node=pipeline_node,
pipeline_info=pipeline_info,
pipeline_run_id=99)
def testRunExecutor_with_InprocessExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.InprocessExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {'key': 'value'}
executor_output = operator.run_executor(
self._get_execution_info(input_dict, output_dict, exec_properties))
self.assertProtoPartiallyEquals(
"""
execution_properties {
key: "key"
value {
string_value: "value"
}
}
output_artifacts {
key: "output_key"
value {
artifacts {
}
}
}""", executor_output)
def testRunExecutor_with_NotInprocessExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.NotInprocessExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {'key': 'value'}
executor_output = operator.run_executor(
self._get_execution_info(input_dict, output_dict, exec_properties))
self.assertProtoPartiallyEquals(
"""
execution_properties {
key: "key"
value {
string_value: "value"
}
}
output_artifacts {
key: "output_key"
value {
artifacts {
}
}
}""", executor_output)
def testRunExecutor_with_InplaceUpdateExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.InplaceUpdateExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {
'string': 'value',
'int': 1,
'float': 0.0,
# This should not happen on production and will be
# dropped.
'proto': execution_result_pb2.ExecutorOutput()
}
executor_output = operator.run_executor(
self._get_execution_info(input_dict, output_dict, exec_properties))
self.assertProtoPartiallyEquals(
"""
execution_properties {
key: "float"
value {
double_value: 0.0
}
}
execution_properties {
key: "int"
value {
int_value: 1
}
}
execution_properties {
key: "string"
value {
string_value: "value"
}
}
output_artifacts {
key: "output_key"
value {
artifacts {
custom_properties {
key: "name"
value {
string_value: "MyPipeline.MyPythonNode.my_model"
}
}
name: "MyPipeline.MyPythonNode.my_model"
}
}
}""", executor_output)
if __name__ == '__main__':
tf.test.main()
|
"""
Filename: plot_ohc_drift.py
Author: Damien Irving, irving.damien@gmail.com
Description: Create a bar chart showing drift in ocean heat content
and its thermal and barystatic components
"""
# Import general Python modules
import sys
import os
import re
import pdb
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cmdline_provenance as cmdprov
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
import matplotlib as mpl
mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['axes.titlesize'] = 'x-large'
mpl.rcParams['xtick.labelsize'] = 'medium'
mpl.rcParams['ytick.labelsize'] = 'large'
mpl.rcParams['legend.fontsize'] = 'large'
# Define functions
def get_quartiles(df, column_name, df_project, units):
"""Get the ensemble quartiles"""
assert len(df) == len(df_project)
quartiles = ['# ' + column_name + ' quartiles']
for project in ['cmip6', 'cmip5']:
df_subset = df[df_project == project]
upper_quartile = df_subset[column_name].abs().quantile(0.75)
median = df_subset[column_name].abs().quantile(0.5)
lower_quartile = df_subset[column_name].abs().quantile(0.25)
upper_quartile_text = "%s upper quartile: %f %s" %(project, upper_quartile, units)
median_text = "%s median: %f %s" %(project, median, units)
lower_quartile_text = "%s lower quartile: %f %s" %(project, lower_quartile, units)
quartiles.append(upper_quartile_text)
quartiles.append(median_text)
quartiles.append(lower_quartile_text)
return quartiles
def main(inargs):
"""Run the program."""
df = pd.read_csv(inargs.infile)
df.set_index(df['model'], drop=True, inplace=True)
#df.set_index(df['model'] + ' (' + df['run'] + ')', drop=True, inplace=True)
x = np.arange(df.shape[0])
ncmip5 = df['project'].value_counts()['cmip5']
df_ohc = df[['OHC (J yr-1)', 'thermal OHC (J yr-1)', 'barystatic OHC (J yr-1)']]
sec_in_year = 365.25 * 24 * 60 * 60
earth_surface_area = 5.1e14
df_ohc = (df_ohc / sec_in_year) / earth_surface_area
df_ohc = df_ohc.rename(columns={"OHC (J yr-1)": "change in OHC ($dH/dt$)",
"thermal OHC (J yr-1)": "change in OHC temperature component ($dH_T/dt$)",
"barystatic OHC (J yr-1)": "change in OHC barystatic component ($dH_m/dt$)"})
df_ohc.plot.bar(figsize=(18,6), color=['#272727', 'tab:red', 'tab:blue'], width=0.9, zorder=2)
plt.axhspan(0.4, 1.0, color='0.95', zorder=1)
plt.axvline(x=ncmip5 - 0.5, color='0.5', linewidth=2.0)
units = 'equivalent planetary energy imbalance (W m$^{-2}$)'
plt.ylabel(units)
plt.axvline(x=x[0]-0.5, color='0.5', linewidth=0.1)
for val in x:
plt.axvline(x=val+0.5, color='0.5', linewidth=0.1)
quartiles = get_quartiles(df_ohc, "change in OHC ($dH/dt$)", df['project'], units)
plt.savefig(inargs.outfile, bbox_inches='tight', dpi=400)
log_file = re.sub('.png', '.met', inargs.outfile)
log_text = cmdprov.new_log(git_repo=repo_dir, extra_notes=quartiles)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description = 'Create a bar chart showing drift in ocean heat content'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infile", type=str, help="Input file name")
parser.add_argument("outfile", type=str, help="Output file name")
args = parser.parse_args()
main(args)
|
import tkinter as tk
import threading
from tkinter import scrolledtext
from tkinter import messagebox
ENCODING = 'utf-8'
class GUI(threading.Thread):
def __init__(self, client):
super().__init__(daemon=False, target=self.run)
self.font = ('Helvetica', 13)
self.client = client
self.login_window = None
self.main_window = None
def run(self):
self.login_window = LoginWindow(self, self.font)
self.main_window = ChatWindow(self, self.font)
self.notify_server(self.login_window.login, 'login')
self.main_window.run()
@staticmethod
def display_alert(message):
"""Display alert box"""
messagebox.showinfo('Error', message)
def update_login_list(self, active_users):
"""Update login list in main window with list of users"""
self.main_window.update_login_list(active_users)
def display_message(self, message):
"""Display message in ChatWindow"""
self.main_window.display_message(message)
def send_message(self, message):
"""Enqueue message in client's queue"""
# add
print('GUI sent: ' + message)
if self.client.target == 'ALL':
act = '2'
else:
act = '1 ' + self.client.target
self.client.queue.put(self.client.encapsulate(message, action=act))
def set_target(self, target):
"""Set target for messages"""
self.client.target = target
def notify_server(self, message, action):
"""Notify server after action was performed"""
#data = action + ";" + message
data = message
# data = data.encode(ENCODING) do not encode before sending!
self.client.notify_server(data, action)
def login(self, login):
self.client.notify_server(login, 'login')
def logout(self, logout):
self.client.notify_server(logout, 'logout')
class Window(object):
def __init__(self, title, font):
self.root = tk.Tk()
self.title = title
self.root.title(title)
self.font = font
class LoginWindow(Window):
def __init__(self, gui, font):
super().__init__("Login", font)
self.gui = gui
self.label = None
self.entry = None
self.button = None
self.login = None
self.build_window()
self.run()
def build_window(self):
"""Build login window, , set widgets positioning and event bindings"""
welcome_text = "Welcome to SECRET CHAT.\nEnter your name."
self.label = tk.Label(self.root, text=welcome_text, width=30, height=5, font=self.font)
self.label.pack(side=tk.TOP, expand=tk.YES)
self.entry = tk.Entry(self.root, width=15, font=self.font)
self.entry.focus_set()
self.entry.pack(side=tk.LEFT, expand=tk.YES)
self.entry.bind('<Return>', self.get_login_event)
self.button = tk.Button(self.root, text='Login', font=self.font)
self.button.pack(side=tk.LEFT, expand=tk.YES)
self.button.bind('<Button-1>', self.get_login_event)
def run(self):
"""Handle login window actions"""
self.root.mainloop()
self.root.destroy()
def get_login_event(self, event):
"""Get login from login box and close login window"""
self.login = self.entry.get()
self.root.quit()
class ChatWindow(Window):
def __init__(self, gui, font):
super().__init__("Secret Chat", font)
self.gui = gui
self.messages_list = None
self.logins_list = None
self.entry = None
self.send_button = None
self.exit_button = None
self.lock = threading.RLock()
self.target = ''
self.login = self.gui.login_window.login
self.build_window()
def build_window(self):
"""Build chat window, set widgets positioning and event bindings"""
# Size config
self.root.geometry('750x500')
self.root.minsize(600, 400)
# Frames config
main_frame = tk.Frame(self.root)
main_frame.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
# swap frame00 and frame01
# List of messages
frame00 = tk.Frame(main_frame)
frame00.grid(column=1, row=0, rowspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
# List of logins
frame01 = tk.Frame(main_frame)
frame01.grid(column=0, row=0, rowspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
# Message entry
frame02 = tk.Frame(main_frame)
frame02.grid(column=0, row=2, columnspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
# Buttons
frame03 = tk.Frame(main_frame)
frame03.grid(column=0, row=3, columnspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
main_frame.rowconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
main_frame.rowconfigure(2, weight=8)
main_frame.columnconfigure(0, weight=1)
main_frame.columnconfigure(1, weight=1)
# ScrolledText widget for displaying messages
self.messages_list = scrolledtext.ScrolledText(frame00, wrap='word', font=self.font)
self.messages_list.insert(tk.END, 'Start Your Secret Chat\n\n')
self.messages_list.configure(state='disabled')
# Listbox widget for displaying active users and selecting them
self.logins_list = tk.Listbox(frame01, selectmode=tk.SINGLE, font=self.font,
exportselection=False)
self.logins_list.bind('<<ListboxSelect>>', self.selected_login_event)
# Entry widget for typing messages in
self.entry = tk.Text(frame02, font=self.font)
self.entry.focus_set()
self.entry.bind('<Return>', self.send_entry_event)
# Button widget for sending messages
self.send_button = tk.Button(frame03, text='Send Message', font=self.font)
self.send_button.bind('<Button-1>', self.send_entry_event)
# Button for exiting
self.exit_button = tk.Button(frame03, text='Exit', font=self.font)
self.exit_button.bind('<Button-1>', self.exit_event)
# Positioning widgets in frame
self.logins_list.pack(fill=tk.BOTH, expand=tk.YES, side=tk.LEFT)
self.messages_list.pack(fill=tk.BOTH, expand=tk.YES, side=tk.LEFT)
self.entry.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.send_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.exit_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
# Protocol for closing window using 'x' button
self.root.protocol("WM_DELETE_WINDOW", self.on_closing_event)
def run(self):
"""Handle chat window actions"""
self.root.mainloop()
self.root.destroy()
def selected_login_event(self, event):
"""Set as target currently selected login on login list"""
target = self.logins_list.get(self.logins_list.curselection())
self.target = target
self.gui.set_target(target)
def send_entry_event(self, event):
"""Send message from entry field to target"""
text = self.entry.get(1.0, tk.END)
if text != '\n':
#message = 'msg;' + self.login + ';' + self.target + ';' + text[:-1]
message = text[:-1]
print(message)
self.gui.send_message(message)
self.entry.mark_set(tk.INSERT, 1.0)
self.entry.delete(1.0, tk.END)
self.entry.focus_set()
else:
messagebox.showinfo('Warning', 'You must enter non-empty message')
with self.lock:
self.messages_list.configure(state='normal')
if text != '\n':
self.messages_list.insert(tk.END, text)
self.messages_list.configure(state='disabled')
self.messages_list.see(tk.END)
return 'break'
def exit_event(self, event):
"""Send logout message and quit app when "Exit" pressed"""
self.gui.notify_server(self.login, 'logout')
self.root.quit()
def on_closing_event(self):
"""Exit window when 'x' button is pressed"""
self.exit_event(None)
def display_message(self, message):
"""Display message in ScrolledText widget"""
with self.lock:
self.messages_list.configure(state='normal')
self.messages_list.insert(tk.END, message)
self.messages_list.configure(state='disabled')
self.messages_list.see(tk.END)
def update_login_list(self, active_users):
"""Update listbox with list of active users"""
self.logins_list.delete(0, tk.END)
for user in active_users:
self.logins_list.insert(tk.END, user)
self.logins_list.select_set(0)
self.target = self.logins_list.get(self.logins_list.curselection())
|
from sqlalchemy.orm.exc import NoResultFound
from whoahqa.models import (
ClinicFactory,
User,
)
from whoahqa.constants import groups
from whoahqa.constants import permissions as perms
def get_request_user(request):
user_id = request.authenticated_userid
try:
return User.get(User.id == user_id)
except NoResultFound:
return None
def can_list_clinics(request):
return request.has_permission(perms.CAN_LIST_CLINICS,
ClinicFactory(request))
def can_view_clinics(request):
return request.has_permission(perms.CAN_VIEW_CLINICS,
ClinicFactory(request))
def is_super_user(request):
return request.has_permission(perms.SUPER_USER,
ClinicFactory(request))
def can_access_clinics(request):
return request.has_permission(perms.CAN_ASSESS_CLINICS,
ClinicFactory(request))
def can_create_period(request):
return request.has_permission(perms.CAN_CREATE_PERIOD,
ClinicFactory(request))
def can_view_municipality(request):
user = request.user
if user.group.name == groups.MUNICIPALITY_MANAGER or (
user.group.name == groups.STATE_OFFICIAL):
return True
return False
def can_view_state(request):
user = request.user
if user.group.name == groups.STATE_OFFICIAL:
return True
return False
def can_list_state(request):
user = request.user
if user.group.name == groups.NATIONAL_OFFICIAL:
return True
return False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-11 01:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Goal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_amount', models.IntegerField(default=0)),
('goal_amount', models.IntegerField()),
('end_date', models.DateTimeField(verbose_name='Due')),
],
),
]
|
from math import ceil
def karatsuba(a,b):
if a < 10 and b < 10:
return a*b
n = max(len(str(a)), len(str(b)))
m = int(ceil(float(n)/2))
a1 = int(a // 10**m)
a2 = int(a % (10**m))
b1 = int(b // 10**m)
b2 = int(b % (10**m))
a = karatsuba(a1,b1)
d = karatsuba(a2,b2)
e = karatsuba(a1 + a2, b1 + b2) -a -d
return int(a*(10**(m*2)) + e*(10**m) + d)
|
from .oauth import BaseOAuth1
class WithingsOAuth(BaseOAuth1):
name = 'withings'
AUTHORIZATION_URL = 'https://oauth.withings.com/account/authorize'
REQUEST_TOKEN_URL = 'https://oauth.withings.com/account/request_token'
ACCESS_TOKEN_URL = 'https://oauth.withings.com/account/access_token'
ID_KEY = 'userid'
def get_user_details(self, response):
"""Return user details from Withings account"""
return {'userid': response['access_token']['userid'],
'email': ''}
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import itertools
import eventlet
import six
from heat.common.i18n import repr_wrapper
from heat.common import timeutils
from heat.engine import dependencies
from heat.engine import scheduler
from heat.tests import common
class DummyTask(object):
def __init__(self, num_steps=3, delays=None):
self.num_steps = num_steps
if delays is not None:
self.delays = iter(delays)
else:
self.delays = itertools.repeat(None)
def __call__(self, *args, **kwargs):
for i in range(1, self.num_steps + 1):
self.do_step(i, *args, **kwargs)
yield next(self.delays)
def do_step(self, step_num, *args, **kwargs):
pass
class ExceptionGroupTest(common.HeatTestCase):
def test_contains_exceptions(self):
exception_group = scheduler.ExceptionGroup()
self.assertIsInstance(exception_group.exceptions, list)
def test_can_be_initialized_with_a_list_of_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertIn(ex1, exception_group.exceptions)
self.assertIn(ex2, exception_group.exceptions)
def test_can_add_exceptions_after_init(self):
ex = Exception()
exception_group = scheduler.ExceptionGroup()
exception_group.exceptions.append(ex)
self.assertIn(ex, exception_group.exceptions)
def test_str_representation_aggregates_all_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertEqual("['ex 1', 'ex 2']", six.text_type(exception_group))
class DependencyTaskGroupTest(common.HeatTestCase):
def setUp(self):
super(DependencyTaskGroupTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
self.aggregate_exceptions = False
self.error_wait_time = None
self.reverse_order = False
@contextlib.contextmanager
def _dep_test(self, *edges):
dummy = DummyTask(getattr(self, 'steps', 3))
deps = dependencies.Dependencies(edges)
tg = scheduler.DependencyTaskGroup(
deps, dummy, reverse=self.reverse_order,
error_wait_time=self.error_wait_time,
aggregate_exceptions=self.aggregate_exceptions)
self.m.StubOutWithMock(dummy, 'do_step')
yield dummy
self.m.ReplayAll()
scheduler.TaskRunner(tg)(wait_time=None)
def test_no_steps(self):
self.steps = 0
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
with self._dep_test(('second', 'first')):
pass
def test_single_node(self):
with self._dep_test(('only', None)) as dummy:
dummy.do_step(1, 'only').AndReturn(None)
dummy.do_step(2, 'only').AndReturn(None)
dummy.do_step(3, 'only').AndReturn(None)
def test_disjoint(self):
with self._dep_test(('1', None), ('2', None)) as dummy:
dummy.do_step(1, '1').InAnyOrder('1')
dummy.do_step(1, '2').InAnyOrder('1')
dummy.do_step(2, '1').InAnyOrder('2')
dummy.do_step(2, '2').InAnyOrder('2')
dummy.do_step(3, '1').InAnyOrder('3')
dummy.do_step(3, '2').InAnyOrder('3')
def test_single_fwd(self):
with self._dep_test(('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
def test_chain_fwd(self):
with self._dep_test(('third', 'second'),
('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
dummy.do_step(1, 'third').AndReturn(None)
dummy.do_step(2, 'third').AndReturn(None)
dummy.do_step(3, 'third').AndReturn(None)
def test_diamond_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid1').InAnyOrder('1')
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(2, 'mid1').InAnyOrder('2')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(3, 'mid1').InAnyOrder('3')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_complex_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'mid3'), ('mid1', 'first'),
('mid3', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(1, 'mid3').InAnyOrder('1')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(2, 'mid3').InAnyOrder('2')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(3, 'mid3').InAnyOrder('3')
dummy.do_step(1, 'mid1').AndReturn(None)
dummy.do_step(2, 'mid1').AndReturn(None)
dummy.do_step(3, 'mid1').AndReturn(None)
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_many_edges_fwd(self):
with self._dep_test(('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3')) as dummy:
dummy.do_step(1, 'e1').InAnyOrder('1edges')
dummy.do_step(1, 'e2').InAnyOrder('1edges')
dummy.do_step(1, 'e3').InAnyOrder('1edges')
dummy.do_step(2, 'e1').InAnyOrder('2edges')
dummy.do_step(2, 'e2').InAnyOrder('2edges')
dummy.do_step(2, 'e3').InAnyOrder('2edges')
dummy.do_step(3, 'e1').InAnyOrder('3edges')
dummy.do_step(3, 'e2').InAnyOrder('3edges')
dummy.do_step(3, 'e3').InAnyOrder('3edges')
dummy.do_step(1, 'mid3').AndReturn(None)
dummy.do_step(2, 'mid3').AndReturn(None)
dummy.do_step(3, 'mid3').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1mid')
dummy.do_step(1, 'mid1').InAnyOrder('1mid')
dummy.do_step(2, 'mid2').InAnyOrder('2mid')
dummy.do_step(2, 'mid1').InAnyOrder('2mid')
dummy.do_step(3, 'mid2').InAnyOrder('3mid')
dummy.do_step(3, 'mid1').InAnyOrder('3mid')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_dbldiamond_fwd(self):
with self._dep_test(('last', 'a1'), ('last', 'a2'),
('a1', 'b1'), ('a2', 'b1'), ('a2', 'b2'),
('b1', 'first'), ('b2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'b1').InAnyOrder('1b')
dummy.do_step(1, 'b2').InAnyOrder('1b')
dummy.do_step(2, 'b1').InAnyOrder('2b')
dummy.do_step(2, 'b2').InAnyOrder('2b')
dummy.do_step(3, 'b1').InAnyOrder('3b')
dummy.do_step(3, 'b2').InAnyOrder('3b')
dummy.do_step(1, 'a1').InAnyOrder('1a')
dummy.do_step(1, 'a2').InAnyOrder('1a')
dummy.do_step(2, 'a1').InAnyOrder('2a')
dummy.do_step(2, 'a2').InAnyOrder('2a')
dummy.do_step(3, 'a1').InAnyOrder('3a')
dummy.do_step(3, 'a2').InAnyOrder('3a')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_circular_deps(self):
d = dependencies.Dependencies([('first', 'second'),
('second', 'third'),
('third', 'first')])
self.assertRaises(dependencies.CircularDependencyException,
scheduler.DependencyTaskGroup, d)
def test_aggregate_exceptions_raises_all_at_the_end(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', None), ('C', None))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(1, 'C').InAnyOrder('1').AndRaise(e1)
dummy.do_step(2, 'A').InAnyOrder('2')
dummy.do_step(2, 'B').InAnyOrder('2').AndRaise(e2)
dummy.do_step(3, 'A').InAnyOrder('3')
e1 = Exception('e1')
e2 = Exception('e2')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1, e2)
self.assertEqual(set([e1, e2]), set(exc.exceptions))
def test_aggregate_exceptions_cancels_dependent_tasks_recursively(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_aggregate_exceptions_cancels_tasks_in_reverse_order(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.reverse_order = True
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'C').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_exceptions_on_cancel(self):
class TestException(Exception):
pass
class ExceptionOnExit(Exception):
pass
cancelled = []
def task_func(arg):
for i in range(4):
if i > 1:
raise TestException
try:
yield
except GeneratorExit:
cancelled.append(arg)
raise ExceptionOnExit
tasks = (('A', None), ('B', None), ('C', None))
deps = dependencies.Dependencies(tasks)
tg = scheduler.DependencyTaskGroup(deps, task_func)
task = tg()
next(task)
next(task)
self.assertRaises(TestException, next, task)
self.assertEqual(len(tasks) - 1, len(cancelled))
def test_exception_grace_period(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.error_wait_time = 5
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
def test_exception_grace_period_expired(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.steps = 5
self.error_wait_time = 0.05
def sleep():
eventlet.sleep(self.error_wait_time)
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
dummy.do_step(4, 'B').WithSideEffects(sleep)
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
def test_exception_grace_period_per_task(self):
e1 = Exception('e1')
def get_wait_time(key):
if key == 'B':
return 5
else:
return None
def run_tasks_with_exceptions():
self.error_wait_time = get_wait_time
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
def test_thrown_exception_order(self):
e1 = Exception('e1')
e2 = Exception('e2')
tasks = (('A', None), ('B', None), ('C', 'A'))
deps = dependencies.Dependencies(tasks)
tg = scheduler.DependencyTaskGroup(
deps, DummyTask(), reverse=self.reverse_order,
error_wait_time=1,
aggregate_exceptions=self.aggregate_exceptions)
task = tg()
next(task)
task.throw(e1)
next(task)
tg.error_wait_time = None
exc = self.assertRaises(type(e2), task.throw, e2)
self.assertIs(e2, exc)
class TaskTest(common.HeatTestCase):
def setUp(self):
super(TaskTest, self).setUp()
scheduler.ENABLE_SLEEP = True
self.addCleanup(self.m.VerifyAll)
def test_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_as_task(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
tr = scheduler.TaskRunner(task)
rt = tr.as_task()
for step in rt:
pass
self.assertTrue(tr.done())
def test_run_as_task_started(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
tr = scheduler.TaskRunner(task)
tr.start()
for step in tr.as_task():
pass
self.assertTrue(tr.done())
def test_run_as_task_cancel(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
tr = scheduler.TaskRunner(task)
rt = tr.as_task()
next(rt)
rt.close()
self.assertTrue(tr.done())
def test_run_as_task_exception(self):
class TestException(Exception):
pass
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
tr = scheduler.TaskRunner(task)
rt = tr.as_task()
next(rt)
self.assertRaises(TestException, rt.throw, TestException)
self.assertTrue(tr.done())
def test_run_as_task_swallow_exception(self):
class TestException(Exception):
pass
def task():
try:
yield
except TestException:
yield
tr = scheduler.TaskRunner(task)
rt = tr.as_task()
next(rt)
rt.throw(TestException)
self.assertFalse(tr.done())
self.assertRaises(StopIteration, next, rt)
self.assertTrue(tr.done())
def test_run_delays(self):
task = DummyTask(delays=itertools.repeat(2))
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_delays_dynamic(self):
task = DummyTask(delays=[2, 4, 1])
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)(wait_time=42)
def test_start_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion()
def test_start_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion(wait_time=24)
def test_run_progress(self):
progress_count = []
def progress():
progress_count.append(None)
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)(progress_callback=progress)
self.assertEqual(task.num_steps, len(progress_count))
def test_start_run_progress(self):
progress_count = []
def progress():
progress_count.append(None)
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion(progress_callback=progress)
self.assertEqual(task.num_steps - 1, len(progress_count))
def test_run_as_task_progress(self):
progress_count = []
def progress():
progress_count.append(None)
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
tr = scheduler.TaskRunner(task)
rt = tr.as_task(progress_callback=progress)
for step in rt:
pass
self.assertEqual(task.num_steps, len(progress_count))
def test_run_progress_exception(self):
class TestException(Exception):
pass
progress_count = []
def progress():
if progress_count:
raise TestException
progress_count.append(None)
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
self.assertRaises(TestException, scheduler.TaskRunner(task),
progress_callback=progress)
self.assertEqual(1, len(progress_count))
def test_start_run_progress_exception(self):
class TestException(Exception):
pass
progress_count = []
def progress():
if progress_count:
raise TestException
progress_count.append(None)
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertRaises(TestException, runner.run_to_completion,
progress_callback=progress)
self.assertEqual(1, len(progress_count))
def test_run_as_task_progress_exception(self):
class TestException(Exception):
pass
progress_count = []
def progress():
if progress_count:
raise TestException
progress_count.append(None)
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
self.m.ReplayAll()
tr = scheduler.TaskRunner(task)
rt = tr.as_task(progress_callback=progress)
next(rt)
next(rt)
self.assertRaises(TestException, next, rt)
self.assertEqual(1, len(progress_count))
def test_run_progress_exception_swallow(self):
class TestException(Exception):
pass
progress_count = []
def progress():
try:
if not progress_count:
raise TestException
finally:
progress_count.append(None)
def task():
try:
yield
except TestException:
yield
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
scheduler.TaskRunner._sleep(0).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)(progress_callback=progress)
self.assertEqual(2, len(progress_count))
def test_start_run_progress_exception_swallow(self):
class TestException(Exception):
pass
progress_count = []
def progress():
try:
if not progress_count:
raise TestException
finally:
progress_count.append(None)
def task():
yield
try:
yield
except TestException:
yield
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
scheduler.TaskRunner._sleep(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion(progress_callback=progress)
self.assertEqual(2, len(progress_count))
def test_run_as_task_progress_exception_swallow(self):
class TestException(Exception):
pass
progress_count = []
def progress():
try:
if not progress_count:
raise TestException
finally:
progress_count.append(None)
def task():
try:
yield
except TestException:
yield
tr = scheduler.TaskRunner(task)
rt = tr.as_task(progress_callback=progress)
next(rt)
next(rt)
self.assertRaises(StopIteration, next, rt)
self.assertEqual(2, len(progress_count))
def test_sleep(self):
sleep_time = 42
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).AndReturn(None)
eventlet.sleep(sleep_time).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=sleep_time)
def test_sleep_zero(self):
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=0)
def test_sleep_none(self):
self.m.StubOutWithMock(eventlet, 'sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
def test_args(self):
args = ['foo', 'bar']
kwargs = {'baz': 'quux', 'blarg': 'wibble'}
self.m.StubOutWithMock(DummyTask, '__call__')
task = DummyTask()
task(*args, **kwargs)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task, *args, **kwargs)
runner(wait_time=None)
def test_non_callable(self):
self.assertRaises(AssertionError, scheduler.TaskRunner, object())
def test_stepping(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertFalse(runner.step())
self.assertTrue(runner)
self.assertFalse(runner.step())
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_start_no_steps(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_start_only(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
def test_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner.start()
self.assertRaises(AssertionError, runner.start)
def test_start_cancelled(self):
runner = scheduler.TaskRunner(DummyTask())
runner.cancel()
self.assertRaises(AssertionError, runner.start)
def test_call_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
self.assertRaises(AssertionError, runner.start)
def test_start_function(self):
def task():
pass
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_repeated_done(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.step())
self.assertTrue(runner.step())
def test_timeout(self):
st = timeutils.wallclock()
def task():
while True:
yield
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertRaises(scheduler.Timeout, runner.step)
def test_timeout_return(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
return
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_timeout_swallowed(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
yield
self.fail('Task still running')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
self.assertTrue(runner.step())
def test_as_task_timeout(self):
st = timeutils.wallclock()
def task():
while True:
yield
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
rt = runner.as_task(timeout=1)
next(rt)
self.assertTrue(runner)
self.assertRaises(scheduler.Timeout, next, rt)
def test_as_task_timeout_shorter(self):
st = timeutils.wallclock()
def task():
while True:
yield
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 0.7)
timeutils.wallclock().AndReturn(st + 1.6)
timeutils.wallclock().AndReturn(st + 2.6)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=10)
self.assertTrue(runner)
rt = runner.as_task(timeout=1)
next(rt)
self.assertRaises(scheduler.Timeout, next, rt)
def test_as_task_timeout_longer(self):
st = timeutils.wallclock()
def task():
while True:
yield
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 0.6)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
rt = runner.as_task(timeout=10)
self.assertRaises(scheduler.Timeout, next, rt)
def test_cancel_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel()
self.assertTrue(runner.done())
def test_cancel_done(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.step())
self.assertTrue(runner.done())
runner.cancel()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_cancel(self):
task = DummyTask(3)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel()
self.assertTrue(runner.step())
def test_cancel_grace_period(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_before_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=10)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_after_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=1.25)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=3)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertRaises(scheduler.Timeout, runner.step)
def test_cancel_grace_period_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel(grace_period=0.5)
self.assertTrue(runner.done())
class TimeoutTest(common.HeatTestCase):
def test_compare(self):
task = scheduler.TaskRunner(DummyTask())
earlier = scheduler.Timeout(task, 10)
eventlet.sleep(0.01)
later = scheduler.Timeout(task, 10)
self.assertTrue(earlier < later)
self.assertTrue(later > earlier)
self.assertEqual(earlier, earlier)
self.assertNotEqual(earlier, later)
class DescriptionTest(common.HeatTestCase):
def setUp(self):
super(DescriptionTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_func(self):
def f():
pass
self.assertEqual('f', scheduler.task_description(f))
def test_lambda(self):
l = lambda: None
self.assertEqual('<lambda>', scheduler.task_description(l))
def test_method(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def m(self):
pass
self.assertEqual('m from C "o"', scheduler.task_description(C().m))
def test_object(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def __call__(self):
pass
self.assertEqual('o', scheduler.task_description(C()))
def test_unicode(self):
@repr_wrapper
@six.python_2_unicode_compatible
class C(object):
def __str__(self):
return u'C "\u2665"'
def __repr__(self):
return u'\u2665'
def __call__(self):
pass
def m(self):
pass
self.assertEqual(u'm from C "\u2665"',
scheduler.task_description(C().m))
self.assertEqual(u'\u2665',
scheduler.task_description(C()))
class WrapperTaskTest(common.HeatTestCase):
def setUp(self):
super(WrapperTaskTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_wrap(self):
child_tasks = [DummyTask() for i in range(3)]
@scheduler.wrappertask
def task():
for child_task in child_tasks:
yield child_task()
yield
for child_task in child_tasks:
self.m.StubOutWithMock(child_task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
scheduler.TaskRunner._sleep(0).AndReturn(None)
for child_task in child_tasks:
child_task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_parent_yield_value(self):
@scheduler.wrappertask
def parent_task():
yield
yield 3
yield iter([1, 2, 4])
task = parent_task()
self.assertIsNone(next(task))
self.assertEqual(3, next(task))
self.assertEqual([1, 2, 4], list(next(task)))
def test_child_yield_value(self):
def child_task():
yield
yield 3
yield iter([1, 2, 4])
@scheduler.wrappertask
def parent_task():
yield child_task()
task = parent_task()
self.assertIsNone(next(task))
self.assertEqual(3, next(task))
self.assertEqual([1, 2, 4], list(next(task)))
def test_child_exception(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_child_exception_exit(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, next, task)
def test_child_exception_swallow(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield
else:
self.fail('No exception raised in parent_task')
yield
task = parent_task()
next(task)
next(task)
def test_child_exception_swallow_next(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
dummy = DummyTask()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
pass
else:
self.fail('No exception raised in parent_task')
yield dummy()
task = parent_task()
next(task)
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
for i in range(1, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_swallow_next(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
yield dummy()
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_raise(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
raise
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_exit(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
return
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_parent_exception(self):
class MyException(Exception):
pass
def child_task():
yield
@scheduler.wrappertask
def parent_task():
yield child_task()
raise MyException()
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_parent_throw(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, task.throw, MyException())
def test_parent_throw_exit(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, task.throw, MyException())
def test_parent_cancel(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_parent_cancel_exit(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel(self):
def child_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_parent_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
|
"""Example: Find all servers per group"""
import os
from configparser import ConfigParser
from cbw_api_toolbox.cbw_api import CBWApi
CONF = ConfigParser()
CONF.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'api.conf'))
CLIENT = CBWApi(CONF.get('cyberwatch', 'url'), CONF.get('cyberwatch', 'api_key'), CONF.get('cyberwatch', 'secret_key'))
CLIENT.ping()
SERVERS = CLIENT.servers()
CATEGORY_BY_GROUPS = {}
# append each server to a group by category dict
for server in SERVERS:
server = CLIENT.server(str(server.id))
for group in server.groups:
if group.name not in CATEGORY_BY_GROUPS:
CATEGORY_BY_GROUPS[group.name] = {}
concerned_group = CATEGORY_BY_GROUPS[group.name]
if server.category not in concerned_group:
concerned_group[server.category] = []
concerned_group[server.category].append(server)
for group in CATEGORY_BY_GROUPS:
print("--- GROUP : {0} ---".format(group))
for category in CATEGORY_BY_GROUPS[group]:
print("{0} : {1}".format(category, len(CATEGORY_BY_GROUPS[group][category])))
for server in CATEGORY_BY_GROUPS[group][category]:
print("{0} with hostname : {1}".format(category, server.hostname))
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetCmekSettings
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_generated_logging_v2_ConfigServiceV2_GetCmekSettings_sync]
from google.cloud import logging_v2
def sample_get_cmek_settings():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
project = "my-project-id"
name = f"projects/{project}/cmekSettings"
request = logging_v2.GetCmekSettingsRequest(
name=name,
)
# Make the request
response = client.get_cmek_settings(request=request)
# Handle response
print(response)
# [END logging_generated_logging_v2_ConfigServiceV2_GetCmekSettings_sync]
|
"""
This file offers the methods to automatically retrieve the graph Azotobacter vinelandii.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:28:09.024485
The undirected graph Azotobacter vinelandii has 4955 nodes and 578640 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04715 and has 18 connected components, where the component with most
nodes has 4918 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 200, the mean node degree is 233.56, and
the node degree mode is 1. The top 5 most central nodes are 322710.Avin_29560
(degree 2047), 322710.Avin_00630 (degree 1761), 322710.Avin_51910 (degree
1573), 322710.Avin_51880 (degree 1360) and 322710.Avin_35230 (degree 1351).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import AzotobacterVinelandii
# Then load the graph
graph = AzotobacterVinelandii()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def AzotobacterVinelandii(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Azotobacter vinelandii graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Azotobacter vinelandii graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:28:09.024485
The undirected graph Azotobacter vinelandii has 4955 nodes and 578640 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04715 and has 18 connected components, where the component with most
nodes has 4918 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 200, the mean node degree is 233.56, and
the node degree mode is 1. The top 5 most central nodes are 322710.Avin_29560
(degree 2047), 322710.Avin_00630 (degree 1761), 322710.Avin_51910 (degree
1573), 322710.Avin_51880 (degree 1360) and 322710.Avin_35230 (degree 1351).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import AzotobacterVinelandii
# Then load the graph
graph = AzotobacterVinelandii()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="AzotobacterVinelandii",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
from django.conf.urls import url
from movies import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='movies-index'),
url(r'^name/$', views.NameSearchView.as_view(), name='movies-name-search'),
url(r'^id/$', views.IDSearchView.as_view(), name='movies-id-search'),
]
|
try:
import time
FirstTime = time.time()
import os
import io
import sys
import time
import glob
import socket
import locale
import hashlib
import tempfile
import datetime
import subprocess
from ctypes import windll
from urllib.request import urlopen
try:
import psutil
importedPsutil = True
except ImportError:
importedPsutil = False
import win32gui
import win32api
import pythoncom
import win32process
import win32com.client
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal as Signal
from pynput.keyboard import Controller, Key
from pynput.mouse import Controller as MouseController
from external.FramelessWindow import QFramelessDialog
from languages import *
import globals
old_stdout = sys.stdout
sys.stdout = buffer = io.StringIO()
from settings import *
from tools import *
import tools
from external.WnfReader import isFocusAssistEnabled, getNotificationNumber
blacklistedProcesses = ["msrdc.exe", "mstsc.exe", "CDViewer.exe", "wfica32.exe", "vmware-view.exe", "vmware.exe"]
blacklistedFullscreenApps = ("", "Program Manager", "NVIDIA GeForce Overlay", "ElenenClock_IgnoreFullscreenEvent") # The "" codes for titleless windows
seconddoubleclick = False
isRDPRunning = False
restartCount = 0
tempDir = ""
timeStr = ""
dateTimeFormat = ""
clocks = []
oldScreens = []
isFocusAssist = False
numOfNotifs = 0
print("---------------------------------------------------------------------------------------------------")
print("")
print(f" ElevenClock's {versionName} (v{version}) log: Select all the text and hit Ctrl+C to copy it")
print(f" All modules loaded successfully and sys.stdout patched correctly, starting main script")
print(f" Translator function set language to \"{langName}\"")
print("")
print("---------------------------------------------------------------------------------------------------")
print("")
print(" Log legend:")
print(" 🔵: Verbose")
print(" 🟢: Information")
print(" 🟡: Warning")
print(" 🟠: Handled unexpected exception")
print(" 🔴: Unhandled unexpected exception")
print(" 🟣: Handled expected exception")
print("")
def _(s) -> str:
return tools._(s)
def checkRDP():
def checkIfElevenClockRunning(processess, blacklistedProcess) -> bool:
for p_name in processess:
if p_name in blacklistedProcess:
print(f"🟡 Blacklisted procName {p_name} detected, hiding...")
return True
return False
global isRDPRunning
print("🔵 Starting RDP thread")
while True:
pythoncom.CoInitialize()
_wmi = win32com.client.GetObject('winmgmts:')
processes = _wmi.ExecQuery('Select Name from win32_process')
procs = [p.Name for p in processes]
isRDPRunning = checkIfElevenClockRunning(procs, blacklistedProcesses)
time.sleep(5)
def getMousePos():
try:
return QPoint(mController.position[0], mController.position[1])
except AttributeError:
print("🟠 Mouse thread returned AttributeError")
except Exception as e:
report(e)
def updateChecker():
updateIfPossible()
time.sleep(60)
while True:
updateIfPossible()
time.sleep(7200)
def updateIfPossible(force = False):
try:
if(not(getSettings("DisableAutoCheckForUpdates")) or force):
print("🔵 Starting update check")
integrityPass = False
dmname = socket.gethostbyname_ex("versions.somepythonthings.tk")[0]
if(dmname == "769432b9-3560-4f94-8f90-01c95844d994.id.repl.co" or getSettings("BypassDomainAuthCheck")): # Check provider IP to prevent exploits
integrityPass = True
try:
response = urlopen("https://versions.somepythonthings.tk/versions/elevenclock.ver" if not getSettings("AlternativeUpdateServerProvider") else "http://www.somepythonthings.tk/versions/elevenclock.ver")
except Exception as e:
report(e)
response = urlopen("http://www.somepythonthings.tk/versions/elevenclock.ver")
integrityPass = True
print("🔵 Version URL:", response.url)
response = response.read().decode("utf8")
new_version_number = response.split("///")[0]
provided_hash = response.split("///")[2].replace("\n", "").lower()
if float(new_version_number) > version:
print("🟢 Updates found!")
if(not(getSettings("DisableAutoInstallUpdates")) or force):
showNotif.infoSignal.emit(("ElevenClock Updater"), ("ElevenClock is downloading updates"))
if(integrityPass):
url = "https://github.com/martinet101/ElevenClock/releases/latest/download/ElevenClock.Installer.exe"
filedata = urlopen(url)
datatowrite = filedata.read()
filename = ""
with open(os.path.join(tempDir, "SomePythonThings-ElevenClock-Updater.exe"), 'wb') as f:
f.write(datatowrite)
filename = f.name
if(hashlib.sha256(datatowrite).hexdigest().lower() == provided_hash):
print("🔵 Hash: ", provided_hash)
print("🟢 Hash ok, starting update")
if(getSettings("EnableSilentUpdates") and not(force)):
mousePos = getMousePos()
time.sleep(5)
while mousePos != getMousePos():
print("🟡 User is using the mouse, waiting")
mousePos = getMousePos()
time.sleep(5)
subprocess.run('start /B "" "{0}" /verysilent'.format(filename), shell=True)
else:
subprocess.run('start /B "" "{0}" /silent'.format(filename), shell=True)
else:
print("🟠 Hash not ok")
print("🟠 File hash: ", hashlib.sha256(datatowrite).hexdigest())
print("🟠 Provided hash: ", provided_hash)
showWarn.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available, but ElevenClock can't verify the authenticity of the package. Please go ElevenClock's homepage and download the latest version from there.\n\nDo you want to open the download page?")
else:
print("🟠 Can't verify update server authenticity, aborting")
print("🟠 Provided DmName:", dmname)
print("🟠 Expected DmNane: 769432b9-3560-4f94-8f90-01c95844d994.id.repl.co")
showWarn.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available, but ElevenClock can't verify the authenticity of the updates server. Please go ElevenClock's homepage and download the latest version from there.\n\nDo you want to open the download page?")
else:
showNotif.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available. Go to ElevenClock's Settings to update")
else:
print("🟢 Updates not found")
else:
print("🟠 Update checking disabled")
#old_stdout.write(buffer.getvalue())
#old_stdout.flush()
except Exception as e:
report(e)
#old_stdout.write(buffer.getvalue())
#old_stdout.flush()
def resetRestartCount():
global restartCount
while True:
if(restartCount>0):
print("🔵 Restart loop:", restartCount)
restartCount -= 1
time.sleep(0.3)
def loadClocks():
global clocks, oldScreens, st, restartCount, st
try:
st.kill()
except AttributeError:
pass
ForceClockOnFirstMonitor = getSettings("ForceClockOnFirstMonitor")
HideClockOnSecondaryMonitors = getSettings("HideClockOnSecondaryMonitors")
oldScreens = []
clocks = []
if importedPsutil:
process = psutil.Process(os.getpid())
memOk = (process.memory_info().rss/1048576) <= 150
else:
print("🟠 Psutil couldn't be imported!")
memOk = True
if restartCount<20 and memOk:
restartCount += 1
i = 0
for screen in app.screens():
screen: QScreen
oldScreens.append(getGeometry(screen))
if not screen == QGuiApplication.primaryScreen() or ForceClockOnFirstMonitor: # Check if we are not on the primary screen
if not HideClockOnSecondaryMonitors or screen == QGuiApplication.primaryScreen(): # First monitor is not affected by HideClockOnSecondaryMonitors
clocks.append(Clock(screen.logicalDotsPerInchX()/96, screen.logicalDotsPerInchY()/96, screen, i))
i += 1
else:
print("🟠 This is a secondary screen and is set to be skipped")
else: # Skip the primary display, as it has already the clock
print("🟡 This is the primary screen and is set to be skipped")
st = KillableThread(target=screenCheckThread, daemon=True, name="Main [loaded]: Screen listener")
st.start()
else:
os.startfile(sys.executable)
print("🔴 Overloading system, killing!")
app.quit()
sys.exit(1)
def getGeometry(screen: QScreen):
"""
Return a tuple containing: (screen_width, screen_height, screen_pos_x, screen_pos_y, screen_DPI, desktopWindowRect)
"""
try:
geometry = screen.geometry()
g = (geometry.width(), geometry.height(), geometry.x(), geometry.y(), screen.logicalDotsPerInch(), win32api.EnumDisplayMonitors())
return g
except Exception as e:
report(e)
geometry = QGuiApplication.primaryScreen().geometry()
g = (geometry.width(), geometry.height(), geometry.x(), geometry.y(), screen.logicalDotsPerInch(), win32api.EnumDisplayMonitors())
return g
def theyMatch(oldscreens, newscreens):
if len(oldscreens) != len(newscreens) or len(app.screens()) != len(win32api.EnumDisplayMonitors()):
return False # The number of displays has changed
# Check that all screen dimensions and dpi are the same as before
return all(old == getGeometry(new) for old, new in zip(oldscreens, newscreens))
def wnfDataThread():
global isFocusAssist, numOfNotifs
while True:
isFocusAssist = isFocusAssistEnabled()
time.sleep(0.25)
if not isFocusAssist:
numOfNotifs = getNotificationNumber()
time.sleep(0.25)
def screenCheckThread():
while theyMatch(oldScreens, app.screens()):
time.sleep(1)
signal.restartSignal.emit()
pass
def closeClocks():
for clock in clocks:
clock.hide()
clock.close()
def showMessage(title: str, body: str, uBtn: bool = True) -> None:
"""
Shows a Windows Notification
"""
lastState = i.isVisible()
i.show()
i.showMessage(title, body)
if uBtn:
sw.updateButton.show()
i.setVisible(lastState)
def restartClocks(caller: str = ""):
global clocks, st, rdpThread
closeClocks()
loadClocks()
loadTimeFormat()
try:
rdpThread.kill()
except AttributeError:
pass
rdpThread = KillableThread(target=checkRDP, daemon=True)
if(getSettings("EnableHideOnRDP")):
rdpThread.start()
def isElevenClockRunningThread():
nowTime = time.time()
name = f"ElevenClockRunning{nowTime}"
setSettings(name, True, False)
while True:
try:
for file in glob.glob(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning*")):
if(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), name) == file):
pass
else:
if(float(file.replace(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning"), "")) < nowTime): # If lockfile is older
os.remove(file)
if not(getSettings(name)):
print("🟠 KILLING, NEWER VERSION RUNNING")
killSignal.infoSignal.emit("", "")
except Exception as e:
report(e)
time.sleep(2)
def wanrUserAboutUpdates(a, b):
if(QMessageBox.question(sw, a, b, QMessageBox.Open | QMessageBox.Cancel, QMessageBox.Open) == QMessageBox.Open):
os.startfile("https://github.com/martinet101/ElevenClock/releases/latest")
def checkIfWokeUpThread():
while True:
lastTime = time.time()
time.sleep(3)
if((lastTime+6) < time.time()):
os.startfile(sys.executable)
def loadTimeFormat():
global dateTimeFormat
showSeconds = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "ShowSecondsInSystemClock", 0) or getSettings("EnableSeconds")
locale.setlocale(locale.LC_ALL, readRegedit(r"Control Panel\International", "LocaleName", "en_US"))
dateTimeFormat = "%HH:%M\n%A\n(W%W) %d/%m/%Y"
if getSettings("DisableTime"):
dateTimeFormat = dateTimeFormat.replace("%HH:%M\n", "")
if getSettings("DisableDate"):
if("\n" in dateTimeFormat):
dateTimeFormat = dateTimeFormat.replace("\n(W%W) %d/%m/%Y", "")
else:
dateTimeFormat = dateTimeFormat.replace("(W%W) %d/%m/%Y", "")
elif not getSettings("EnableWeekNumber"):
dateTimeFormat = dateTimeFormat.replace("(W%W) ", "")
else:
dateTimeFormat = dateTimeFormat.replace("(W%W) ", f"({_('W')}%W) ")
if not getSettings("EnableWeekDay"):
try:
dateTimeFormat = dateTimeFormat.replace("%A", "").replace("\n\n", "\n")
if dateTimeFormat[-1] == "\n":
dateTimeFormat = dateTimeFormat[0:-1]
if dateTimeFormat[0] == "\n":
dateTimeFormat = dateTimeFormat[1:]
except IndexError as e:
print("🟠 Date/Time string looks to be empty!")
except Exception as e:
report(e)
tDateMode = readRegedit(r"Control Panel\International", "sShortDate", "dd/MM/yyyy")
print("🔵 tDateMode:", tDateMode)
dateMode = ""
for i, ministr in enumerate(tDateMode.split("'")):
if i%2==0:
dateMode += ministr.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%$").replace("d", "%#d").replace("$", "d").replace("MMMM", "%B").replace("MMM", "%b").replace("MM", "%m").replace("M", "%#m").replace("yyyy", "%Y").replace("yy", "%y")
else:
dateMode += ministr
tTimeMode = readRegedit(r"Control Panel\International", "sShortTime", "H:mm")
print("🔵 tTimeMode:", tTimeMode)
timeMode = ""
for i, ministr in enumerate(tTimeMode.split("'")):
if i%2==0:
timeMode += ministr.replace("HH", "%$").replace("H", "%#H").replace("$", "H").replace("hh", "%I").replace("h", "%#I").replace("mm", "%M").replace("m", "%#M").replace("tt", "%p").replace("t", "%p").replace("ss", "%S").replace("s", "%#S")
if not("S" in timeMode) and showSeconds == 1:
for separator in ":.-/_":
if(separator in timeMode):
timeMode += f"{separator}%S"
else:
timeMode += ministr
for separator in ":.-/_":
timeMode = timeMode.replace(f" %p{separator}%S", f"{separator}%S %p")
timeMode = timeMode.replace(f" %p{separator}%#S", f"{separator}%#S %p")
timeMode = timeMode.replace("%S", "%S·").replace("%#S", "%#S·")
dateTimeFormat = dateTimeFormat.replace("%d/%m/%Y", dateMode).replace("%HH:%M", timeMode)
print("🔵 Loaded date time format:", dateTimeFormat)
def timeStrThread():
global timeStr, dateTimeFormat
fixHyphen = getSettings("EnableHyphenFix")
encoding = 'unicode-escape'
while True:
for _ in range(36000):
dateTimeFormatUnicode = dateTimeFormat.encode(encoding).decode()
now = datetime.datetime.now()
timeStr = now.strftime(dateTimeFormatUnicode).encode().decode(encoding)
if fixHyphen:
timeStr = timeStr.replace("t-", "t -")
try:
secs = datetime.datetime.now().strftime("%S")
if secs[-1] == "1":
timeStr = timeStr.replace("·", " \u200e")
else:
timeStr = timeStr.replace("·", "")
except IndexError:
pass
time.sleep(0.1)
class RestartSignal(QObject):
restartSignal = Signal()
def __init__(self) -> None:
super().__init__()
class InfoSignal(QObject):
infoSignal = Signal(str, str)
def __init__(self) -> None:
super().__init__()
class Clock(QWidget):
refresh = Signal()
hideSignal = Signal()
callInMainSignal = Signal(object)
styler = Signal(str)
preferedwidth = 200
preferedHeight = 48
focusassitant = True
lastTheme = 0
clockShouldBeHidden = False
shouldBeVisible = True
isRDPRunning = True
clockOnTheLeft = False
textInputHostHWND = 0
INTLOOPTIME = 2
def __init__(self, dpix: float, dpiy: float, screen: QScreen, index: int):
super().__init__()
if f"_{screen.name()}_" in getSettingsValue("BlacklistedMonitors"):
print("🟠 Monitor blacklisted!")
self.hide()
else:
self.index = index
print(f"🔵 Initializing clock {index}...")
self.callInMainSignal.connect(lambda f: f())
self.styler.connect(self.setStyleSheet)
self.taskbarBackgroundColor = not getSettings("DisableTaskbarBackgroundColor") and not (getSettings("UseCustomBgColor") or getSettings("AccentBackgroundcolor"))
self.transparentBackground = getSettings("DisableTaskbarBackgroundColor") and not (getSettings("UseCustomBgColor") or getSettings("AccentBackgroundcolor"))
if self.taskbarBackgroundColor:
print("🔵 Using taskbar background color")
self.bgcolor = "0, 0, 0, 0"
else:
print("🟡 Not using taskbar background color")
if getSettings("AccentBackgroundcolor"):
self.bgcolor = f"{getColors()[5 if isTaskbarDark() else 1]},100"
else:
self.bgcolor = getSettingsValue("UseCustomBgColor") if getSettingsValue("UseCustomBgColor") else "0, 0, 0, 0"
print("🔵 Using bg color:", self.bgcolor)
self.prefMargins = 0
try:
if readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "TaskbarSi", 1) == 0 or (not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
self.prefMargins = self.getPx(5)
self.widgetStyleSheet = f"background-color: rgba(bgColor%); margin: {self.getPx(0)}px;margin-top: 0px;margin-bottom: 0px; border-radius: {self.getPx(5)}px;"
if not(not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
print("🟡 Small sized taskbar")
self.preferedHeight = 32
self.preferedwidth = 200
else:
print("🟢 Regular sized taskbar")
self.prefMargins = self.getPx(3)
self.widgetStyleSheet = f"background-color: rgba(bgColor%);margin: {self.getPx(0)}px;border-radius: {self.getPx(5)}px;padding: {self.getPx(2)}px;"
except Exception as e:
print("🟡 Regular sized taskbar")
report(e)
self.prefMargins = self.getPx(3)
self.widgetStyleSheet = f"background-color: rgba(bgColor%);margin: {self.getPx(0)}px;border-radius: {self.getPx(5)}px;;padding: {self.getPx(2)}px;"
self.setStyleSheet(self.widgetStyleSheet.replace("bgColor", self.bgcolor))
if getSettings("ClockFixedHeight"):
print("🟡 Custom height being used!")
try:
self.preferedHeight = int(getSettingsValue("ClockFixedHeight"))
except ValueError as e:
report(e)
self.win32screen = {"Device": None, "Work": (0, 0, 0, 0), "Flags": 0, "Monitor": (0, 0, 0, 0)}
for win32screen in win32api.EnumDisplayMonitors():
try:
if win32api.GetMonitorInfo(win32screen[0].handle)["Device"] == screen.name():
self.win32screen = win32api.GetMonitorInfo(win32screen[0].handle)
except Exception as e:
report(e)
if self.win32screen == {"Device": None, "Work": (0, 0, 0, 0), "Flags": 0, "Monitor": (0, 0, 0, 0)}: #If no display is matching
os.startfile(sys.executable) # Restart elevenclock
app.quit()
self.screenGeometry = QRect(self.win32screen["Monitor"][0], self.win32screen["Monitor"][1], self.win32screen["Monitor"][2]-self.win32screen["Monitor"][0], self.win32screen["Monitor"][3]-self.win32screen["Monitor"][1])
print("🔵 Monitor geometry:", self.screenGeometry)
self.refresh.connect(self.refreshandShow)
self.hideSignal.connect(self.hide)
self.keyboard = Controller()
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.setWindowFlag(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlag(Qt.Tool)
hex_blob = b'0\x00\x00\x00\xfe\xff\xff\xffz\xf4\x00\x00\x03\x00\x00\x00T\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x08\x04\x00\x00\x80\x07\x00\x008\x04\x00\x00`\x00\x00\x00\x01\x00\x00\x00'
registry_read_result = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3", "Settings", hex_blob)
self.autoHide = registry_read_result[8] == 123
if self.autoHide:
print("🟡 ElevenClock set to hide with the taskbar")
self.clockOnTheLeft = getSettings("ClockOnTheLeft")
screenName = screen.name().replace("\\", "_")
if not self.clockOnTheLeft:
if getSettings(f"SpecificClockOnTheLeft{screenName}"):
self.clockOnTheLeft = True
print(f"🟡 Clock {screenName} on the left (forced)")
else:
if getSettings(f"SpecificClockOnTheRight{screenName}"):
self.clockOnTheLeft = False
print(f"🟡 Clock {screenName} on the right (forced)")
try:
if (registry_read_result[12] == 1 and not getSettings("ForceOnBottom")) or getSettings("ForceOnTop"):
h = self.screenGeometry.y()
print("🟢 Taskbar at top")
else:
h = self.screenGeometry.y()+self.screenGeometry.height()-(self.preferedHeight*dpiy)
print("🟡 Taskbar at bottom")
except Exception as e:
report(e)
h = self.screenGeometry.y()+self.screenGeometry.height()-(self.preferedHeight*dpiy)
print("🟡 Taskbar at bottom")
self.label = Label(timeStr, self)
if self.clockOnTheLeft:
print("🟡 Clock on the left")
w = self.screenGeometry.x()+8*dpix
self.label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
else:
self.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
print("🟢 Clock on the right")
w = self.screenGeometry.x()+self.screenGeometry.width()-((self.preferedwidth)*dpix)
if getSettings("CenterAlignment"):
self.label.setAlignment(Qt.AlignCenter)
xoff = 0
yoff = 0
if getSettings("ClockXOffset"):
print("🟡 X offset being used!")
try:
xoff = int(getSettingsValue("ClockXOffset"))
except ValueError as e:
report(e)
if getSettings("ClockYOffset"):
print("🟡 Y offset being used!")
try:
yoff = int(getSettingsValue("ClockYOffset"))
except ValueError as e:
report(e)
self.w = int(w) + xoff
self.h = int(h) + yoff
self.dpix = dpix
self.dpiy = dpiy
if not(getSettings("EnableWin32API")):
print("🟢 Using qt's default positioning system")
self.move(self.w, self.h)
self.resize(int(self.preferedwidth*dpix), int(self.preferedHeight*dpiy))
else:
print("🟡 Using win32 API positioning system")
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # forces functions to return real pixel numbers instead of scaled values
win32gui.SetWindowPos(self.winId(), 0, int(w), int(h), int(self.preferedwidth*dpix), int(self.preferedHeight*dpiy), False)
print("🔵 Clock geometry:", self.geometry())
self.font: QFont = QFont()
customFont = getSettingsValue("UseCustomFont")
if customFont == "":
if lang == lang_ko:
self.fontfamilies = ["Malgun Gothic", "Segoe UI Variable", "sans-serif"]
elif lang == lang_zh_TW:
self.fontfamilies = ["Microsoft JhengHei UI", "Segoe UI Variable", "sans-serif"]
elif lang == lang_zh_CN:
self.fontfamilies = ["Microsoft YaHei UI", "Segoe UI Variable", "sans-serif"]
else:
self.fontfamilies = ["Segoe UI Variable Display", "sans-serif"]
else:
self.fontfamilies = [customFont]
print(f"🔵 Font families: {self.fontfamilies}")
customSize = getSettingsValue("UseCustomFontSize")
if customSize == "":
self.font.setPointSizeF(9.3)
else:
try:
self.font.setPointSizeF(float(customSize))
except Exception as e:
self.font.setPointSizeF(9.3)
report(e)
print(f"🔵 Font size: {self.font.pointSizeF()}")
self.font.setStyleStrategy(QFont.PreferOutline)
self.font.setLetterSpacing(QFont.PercentageSpacing, 100)
self.font.setHintingPreference(QFont.HintingPreference.PreferNoHinting)
self.label.setFont(self.font)
accColors = getColors()
def make_style_sheet(a, b, c, d, color):
bg = 1 if isTaskbarDark() else 4
fg = 6 if isTaskbarDark() else 1
return f"*{{padding: {a}px;padding-right: {b}px;margin-right: {c}px;padding-left: {d}px; color: {color};}}#notifIndicator{{background-color: rgb({accColors[bg]});color:rgb({accColors[fg]});}}"
if getSettings("UseCustomFontColor"):
print("🟡 Using custom text color:", getSettingsValue('UseCustomFontColor'))
self.lastTheme = -1
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), f"rgb({getSettingsValue('UseCustomFontColor')})")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
elif isTaskbarDark():
print("🟢 Using white text (dark mode)")
self.lastTheme = 0
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), "white")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
else:
print("🟢 Using black text (light mode)")
self.lastTheme = 1
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), "black")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .5
self.fontfamilies = [element.replace("Segoe UI Variable Display Semib", "Segoe UI Variable Display") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
self.font.setWeight(QFont.Weight.ExtraLight)
self.label.setFont(self.font)
self.label.clicked.connect(lambda: self.showCalendar())
self.label.move(0, 0)
self.label.setFixedHeight(self.height())
self.label.resize(self.width()-self.getPx(8), self.height())
self.label.show()
loadTimeFormat()
self.show()
self.raise_()
self.setFocus()
self.full_screen_rect = (self.screenGeometry.x(), self.screenGeometry.y(), self.screenGeometry.x()+self.screenGeometry.width(), self.screenGeometry.y()+self.screenGeometry.height())
print("🔵 Full screen rect: ", self.full_screen_rect)
self.forceDarkTheme = getSettings("ForceDarkTheme")
self.forceLightTheme = getSettings("ForceLightTheme")
self.hideClockWhenClicked = getSettings("HideClockWhenClicked")
self.isLowCpuMode = getSettings("EnableLowCpuMode")
self.primary_screen = QGuiApplication.primaryScreen()
self.oldBgColor = 0
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # optional, makes functions return real pixel numbers instead of scaled values
self.loop0 = KillableThread(target=self.updateTextLoop, daemon=True, name=f"Clock[{index}]: Time updater loop")
self.loop1 = KillableThread(target=self.mainClockLoop, daemon=True, name=f"Clock[{index}]: Main clock loop")
self.loop2 = KillableThread(target=self.backgroundLoop, daemon=True, name=f"Clock[{index}]: Background color loop")
self.loop0.start()
self.loop1.start()
self.loop2.start()
class QHoverButton(QPushButton):
hovered = Signal()
unhovered = Signal()
def __init__(self, text: str = "", parent: QObject = None) -> None:
super().__init__(text=text, parent=parent)
def enterEvent(self, event: QtCore.QEvent) -> None:
self.hovered.emit()
return super().enterEvent(event)
def leaveEvent(self, event: QtCore.QEvent) -> None:
self.unhovered.emit()
return super().leaveEvent(event)
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "TaskbarSd", 0) == 1) or getSettings("ShowDesktopButton"):
print("🟡 Desktop button enabled")
self.desktopButton = QHoverButton(parent=self)
self.desktopButton.clicked.connect(lambda: self.showDesktop())
self.desktopButton.show()
self.desktopButton.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.desktopButton.move(self.width()-self.getPx(10), 0)
self.desktopButton.resize(self.getPx(10), self.getPx(self.preferedHeight))
self.desktopButton.hovered.connect(lambda: self.desktopButton.setIcon(QIcon(getPath("showdesktop.png"))))
self.desktopButton.unhovered.connect(lambda: self.desktopButton.setIcon(QIcon()))
self.setFixedHeight(self.getPx(self.preferedHeight))
self.desktopButton.setStyleSheet(f"""
QPushButton{{
background-color: rgba(0, 0, 0, 0.01);
margin: 0px;
padding: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
QPushButton:hover{{
background-color: rgba(127, 127, 127, 1%);
margin: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
QPushButton:pressed{{
background-color: rgba(127, 127, 127, 1%);
margin: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
""")
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInch()/96))
def backgroundLoop(self):
while True:
try:
if self.taskbarBackgroundColor and not self.isLowCpuMode and not globals.trayIcon.contextMenu().isVisible():
intColor = self.primary_screen.grabWindow(0, self.x()+self.label.x()-1, self.y()+2, 1, 1).toImage().pixel(0, 0)
if intColor != self.oldBgColor:
self.oldBgColor = intColor
color = QColor(intColor)
self.styler.emit(self.widgetStyleSheet.replace("bgColor", f"{color.red()}, {color.green()}, {color.blue()}, 100"))
except AttributeError:
print("🟣 Expected AttributeError on backgroundLoop thread")
time.sleep(0.5)
def theresFullScreenWin(self, clockOnFirstMon, newMethod, legacyMethod):
try:
fullscreen = False
def compareFullScreenRects(window, screen, newMethod):
try:
if(newMethod):
return window[0] <= screen[0] and window[1] <= screen[1] and window[2] >= screen[2] and window[3] >= screen[3]
else:
return window[0] == screen[0] and window[1] == screen[1] and window[2] == screen[2] and window[3] == screen[3]
except Exception as e:
report(e)
def winEnumHandler(hwnd, _):
nonlocal fullscreen
if win32gui.IsWindowVisible(hwnd):
if compareFullScreenRects(win32gui.GetWindowRect(hwnd), self.full_screen_rect, newMethod):
if clockOnFirstMon and self.textInputHostHWND == 0:
pythoncom.CoInitialize()
_, pid = win32process.GetWindowThreadProcessId(hwnd)
_wmi = win32com.client.GetObject('winmgmts:')
# collect all the running processes
processes = _wmi.ExecQuery(f'Select Name from win32_process where ProcessId = {pid}')
for p in processes:
if p.Name != "TextInputHost.exe":
if(win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps):
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
else:
print("🟢 Cached text input host hwnd:", hwnd)
self.textInputHostHWND = hwnd
self.INTLOOPTIME = 2
else:
if win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps and hwnd != self.textInputHostHWND:
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
if not legacyMethod:
win32gui.EnumWindows(winEnumHandler, 0)
else:
hwnd = win32gui.GetForegroundWindow()
if(compareFullScreenRects(win32gui.GetWindowRect(hwnd), self.full_screen_rect, newMethod)):
if(win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps):
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
return fullscreen
except Exception as e:
report(e)
return False
def mainClockLoop(self):
global isRDPRunning, numOfNotifs
EnableHideOnFullScreen = not(getSettings("DisableHideOnFullScreen"))
DisableHideWithTaskbar = getSettings("DisableHideWithTaskbar")
EnableHideOnRDP = getSettings("EnableHideOnRDP")
clockOnFirstMon = getSettings("ForceClockOnFirstMonitor")
newMethod = getSettings("NewFullScreenMethod")
notifs = not getSettings("DisableNotifications")
legacyMethod = getSettings("legacyFullScreenMethod")
oldNotifNumber = 0
print(f"🔵 Show/hide loop started with parameters: HideonFS:{EnableHideOnFullScreen}, NotHideOnTB:{DisableHideWithTaskbar}, HideOnRDP:{EnableHideOnRDP}, ClockOn1Mon:{clockOnFirstMon}, NefWSMethod:{newMethod}, DisableNotifications:{notifs}, legacyFullScreenMethod:{legacyMethod}")
if self.isLowCpuMode or clockOnFirstMon:
self.INTLOOPTIME = 15
else:
self.INTLOOPTIME = 2
while True:
self.isRDPRunning = isRDPRunning
isFullScreen = self.theresFullScreenWin(clockOnFirstMon, newMethod, legacyMethod)
for i in range(self.INTLOOPTIME):
if (not(isFullScreen) or not(EnableHideOnFullScreen)) and not self.clockShouldBeHidden:
if notifs:
if isFocusAssist:
self.callInMainSignal.emit(self.label.enableFocusAssistant)
elif numOfNotifs > 0:
if oldNotifNumber != numOfNotifs:
self.callInMainSignal.emit(self.label.enableNotifDot)
else:
self.callInMainSignal.emit(self.label.disableClockIndicators)
oldNotifNumber = numOfNotifs
if self.autoHide and not(DisableHideWithTaskbar):
mousePos = getMousePos()
if (mousePos.y()+1 == self.screenGeometry.y()+self.screenGeometry.height()) and self.screenGeometry.x() < mousePos.x() and self.screenGeometry.x()+self.screenGeometry.width() > mousePos.x():
self.refresh.emit()
elif (mousePos.y() <= self.screenGeometry.y()+self.screenGeometry.height()-self.preferedHeight):
self.hideSignal.emit()
else:
if(self.isRDPRunning and EnableHideOnRDP):
self.hideSignal.emit()
else:
self.refresh.emit()
else:
self.hideSignal.emit()
time.sleep(0.2)
time.sleep(0.2)
def updateTextLoop(self) -> None:
global timeStr
while True:
self.label.setText(timeStr)
time.sleep(0.1)
def showCalendar(self):
self.keyboard.press(Key.cmd)
self.keyboard.press('n')
self.keyboard.release('n')
self.keyboard.release(Key.cmd)
if self.hideClockWhenClicked:
print("🟡 Hiding clock because clicked!")
self.clockShouldBeHidden = True
def showClockOn10s(self: Clock):
time.sleep(10)
print("🟢 Showing clock because 10s passed!")
self.clockShouldBeHidden = False
KillableThread(target=showClockOn10s, args=(self,), name=f"Temporary: 10s thread").start()
def showDesktop(self):
self.keyboard.press(Key.cmd)
self.keyboard.press('d')
self.keyboard.release('d')
self.keyboard.release(Key.cmd)
def focusOutEvent(self, event: QFocusEvent) -> None:
self.refresh.emit()
def refreshandShow(self):
if(self.shouldBeVisible):
self.show()
self.raise_()
if(self.lastTheme >= 0): # If the color is not customized
theme = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "SystemUsesLightTheme", 1)
if(theme != self.lastTheme):
if (theme == 0 or self.forceDarkTheme) and not self.forceLightTheme:
self.lastTheme = 0
self.label.setStyleSheet(f"padding: {self.getPx(1)}px;padding-right: {self.getPx(3)}px;margin-right: {self.getPx(12)}px;padding-left: {self.getPx(5)}px; color: white;")#background-color: rgba({self.bgcolor}%)")
self.label.bgopacity = 0.1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
else:
self.lastTheme = 1
self.label.setStyleSheet(f"padding: {self.getPx(1)}px;padding-right: {self.getPx(3)}px;margin-right: {self.getPx(12)}px;padding-left: {self.getPx(5)}px; color: black;")#background-color: rgba({self.bgcolor}%)")
self.label.bgopacity = .5
self.fontfamilies = [element.replace("Segoe UI Variable Display Semib", "Segoe UI Variable Display") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
self.font.setWeight(QFont.Weight.ExtraLight)
self.label.setFont(self.font)
def closeEvent(self, event: QCloseEvent) -> None:
self.shouldBeVisible = False
try:
print(f"🟡 Closing clock on {self.win32screen}")
self.loop0.kill()
self.loop1.kill()
self.loop2.kill()
except AttributeError:
pass
event.accept()
return super().closeEvent(event)
def showEvent(self, event: QShowEvent) -> None:
return super().showEvent(event)
class Label(QLabel):
clicked = Signal()
def __init__(self, text, parent):
super().__init__(text, parent=parent)
self.setMouseTracking(True)
self.backgroundwidget = QWidget(self)
self.color = "255, 255, 255"
self.installEventFilter(self)
self.bgopacity = 0.1
self.backgroundwidget.setContentsMargins(0, self.window().prefMargins, 0, self.window().prefMargins)
self.backgroundwidget.setStyleSheet(f"background-color: rgba(127, 127, 127, 0.01);border-top: {self.getPx(1)}px solid rgba({self.color},0);margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};")
self.backgroundwidget.show()
if self.window().transparentBackground:
colorOffset = .01
else:
colorOffset = 0
self.showBackground = QVariantAnimation()
self.showBackground.setStartValue(0+colorOffset) # Not 0 to prevent white flashing on the border
self.showBackground.setEndValue(self.bgopacity)
self.showBackground.setDuration(100)
self.showBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.showBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: {self.getPx(1)}px solid rgba({self.color}, {opacity+colorOffset});margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};"))
self.hideBackground = QVariantAnimation()
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(0+colorOffset) # Not 0 to prevent white flashing on the border
self.hideBackground.setDuration(100)
self.hideBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.hideBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: {self.getPx(1)}px solid rgba({self.color}, {opacity+colorOffset});margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};"))
self.setAutoFillBackground(True)
self.backgroundwidget.setGeometry(0, 0, self.width(), self.height())
self.opacity=QGraphicsOpacityEffect(self)
self.opacity.setOpacity(1.00)
self.backgroundwidget.setGraphicsEffect(self.opacity)
self.focusassitant = True
self.focusAssitantLabel = QPushButton(self)
self.focusAssitantLabel.move(self.width(), 0)
self.focusAssitantLabel.setAttribute(Qt.WA_TransparentForMouseEvents)
self.focusAssitantLabel.setStyleSheet("background: transparent; margin: none; padding: none;")
self.focusAssitantLabel.resize(self.getPx(30), self.height())
self.focusAssitantLabel.setIcon(QIcon(getPath(f"moon_{getTaskbarIconMode()}.png")))
self.focusAssitantLabel.setIconSize(QSize(self.getPx(16), self.getPx(16)))
accColors = getColors()
self.notifdot = True
self.notifDotLabel = QLabel("", self)
self.notifDotLabel.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.notifDotLabel.setObjectName("notifIndicator")
self.notifDotLabel.setStyleSheet(f"font-size: 8pt;font-family: \"Segoe UI Variable Display\";border-radius: {self.getPx(8)}px;padding: 0px;padding-bottom: {self.getPx(2)}px;padding-left: {self.getPx(3)}px;padding-right: {self.getPx(2)}px;margin: 0px;border:0px;")
self.disableClockIndicators()
def enableFocusAssistant(self):
if not self.focusassitant:
if self.notifdot:
self.disableClockIndicators()
self.focusassitant = True
self.setContentsMargins(self.getPx(5), self.getPx(2), self.getPx(43), self.getPx(2))
self.focusAssitantLabel.move(self.width()-self.contentsMargins().right(), 0)
self.focusAssitantLabel.setFixedWidth(self.getPx(30))
self.focusAssitantLabel.setFixedHeight(self.height())
self.focusAssitantLabel.setIconSize(QSize(self.getPx(16), self.getPx(16)))
self.focusAssitantLabel.setIcon(QIcon(getPath(f"moon_{getTaskbarIconMode()}.png")))
self.focusAssitantLabel.show()
def enableNotifDot(self):
self.notifDotLabel.setText(str(numOfNotifs))
if not self.notifdot:
self.notifdot = True
self.setContentsMargins(self.getPx(5), self.getPx(2), self.getPx(43), self.getPx(2))
topBottomPadding = (self.height()-self.getPx(16))/2 # top-bottom margin
leftRightPadding = (self.getPx(30)-self.getPx(16))/2 # left-right margin
self.notifDotLabel.move(int(self.width()-self.contentsMargins().right()+leftRightPadding), int(topBottomPadding))
self.notifDotLabel.resize(self.getPx(16), self.getPx(16))
self.notifDotLabel.setStyleSheet(f"font-size: 8pt;font-family: \"Segoe UI Variable Display\";border-radius: {self.getPx(8)}px;padding: 0px;padding-bottom: {self.getPx(2)}px;padding-left: {self.getPx(3)}px;padding-right: {self.getPx(2)}px;margin: 0px;border:0px;")
self.notifDotLabel.show()
def disableClockIndicators(self):
if self.focusassitant:
self.focusassitant = False
self.setContentsMargins(self.getPx(6), self.getPx(2), self.getPx(13), self.getPx(2))
self.focusAssitantLabel.hide()
if self.notifdot:
self.notifdot = False
self.setContentsMargins(self.getPx(6), self.getPx(2), self.getPx(13), self.getPx(2))
self.notifDotLabel.hide()
def getPx(self, i: int) -> int:
return round(i*(self.screen().logicalDotsPerInch()/96))
def enterEvent(self, event: QEvent, r=False) -> None:
geometry: QRect = self.width()
self.showBackground.setStartValue(.01)
self.showBackground.setEndValue(self.bgopacity) # Not 0 to prevent white flashing on the border
if not self.window().clockOnTheLeft:
self.backgroundwidget.move(0, 2)
self.backgroundwidget.resize(geometry, self.height()-4)
else:
self.backgroundwidget.move(0, 2)
self.backgroundwidget.resize(geometry, self.height()-4)
self.showBackground.start()
if not r:
self.enterEvent(event, r=True)
return super().enterEvent(event)
def leaveEvent(self, event: QEvent) -> None:
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(.01) # Not 0 to prevent white flashing on the border
self.hideBackground.start()
return super().leaveEvent(event)
def getTextUsedSpaceRect(self):
text = self.text().strip()
if len(text.split("\n"))>=3:
mult = 0.633333333333333333
elif len(text.split("\n"))==2:
mult = 1
else:
mult = 1.5
return self.fontMetrics().boundingRect(text).width()*mult
def mousePressEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(0.7)
self.setWindowOpacity(0.7)
self.opacity.setOpacity(0.60)
self.backgroundwidget.setGraphicsEffect(self.opacity)
return super().mousePressEvent(ev)
def mouseReleaseEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(1)
self.setWindowOpacity(1)
self.opacity.setOpacity(1.00)
self.backgroundwidget.setGraphicsEffect(self.opacity)
if(ev.button() == Qt.RightButton):
mousePos = getMousePos()
if(i.contextMenu().height() != 480):
mousePos.setY(self.window().y()-(i.contextMenu().height()+5))
else:
if getSettings("HideTaskManagerButton"):
mousePos.setY(self.window().y()-int(260*(i.contextMenu().screen().logicalDotsPerInchX()/96)))
else:
mousePos.setY(self.window().y()-int(370*(i.contextMenu().screen().logicalDotsPerInchX()/96)))
i.execMenu(mousePos)
else:
self.clicked.emit()
return super().mouseReleaseEvent(ev)
def paintEvent(self, event: QPaintEvent) -> None:
w = self.minimumSizeHint().width()
try:
mw = int(getSettingsValue("ClockFixedWidth"))
if mw > w:
w = mw
except Exception as e:
report(e)
if w<self.window().getPx(self.window().preferedwidth) and not self.window().clockOnTheLeft:
self.move(self.window().getPx(self.window().preferedwidth)-w+self.getPx(2), 0)
self.resize(w, self.height())
else:
self.move(0, 0)
self.resize(w, self.height())
return super().paintEvent(event)
def resizeEvent(self, event: QResizeEvent) -> None:
if self.focusassitant:
self.focusassitant = False
self.enableFocusAssistant()
elif self.notifdot:
self.notifdot = False
self.enableNotifDot()
else:
self.notifdot = True
self.focusassitant = True
self.disableClockIndicators()
return super().resizeEvent(event)
def window(self) -> Clock:
return super().window()
# Start of main script
QApplication.setAttribute(Qt.AA_DisableHighDpiScaling)
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
mController: MouseController = None
sw: SettingsWindow = None
i: TaskbarIconTray = None
st: KillableThread = None # Will be defined on loadClocks
KillableThread(target=resetRestartCount, daemon=True, name="Main: Restart counter").start()
KillableThread(target=timeStrThread, daemon=True, name="Main: Locale string loader").start()
loadClocks()
print(f"🟢 Loaded clocks in {time.time()-FirstTime}")
tdir = tempfile.TemporaryDirectory()
tempDir = tdir.name
sw = SettingsWindow() # Declare settings window
i = TaskbarIconTray(app)
mController = MouseController()
app.primaryScreenChanged.connect(lambda: os.startfile(sys.executable))
app.screenAdded.connect(lambda: os.startfile(sys.executable))
app.screenRemoved.connect(lambda: os.startfile(sys.executable))
signal = RestartSignal()
showNotif = InfoSignal()
showWarn = InfoSignal()
killSignal = InfoSignal()
showNotif.infoSignal.connect(lambda a, b: showMessage(a, b))
showWarn.infoSignal.connect(lambda a, b: wanrUserAboutUpdates(a, b))
killSignal.infoSignal.connect(lambda: app.quit())
signal.restartSignal.connect(lambda: restartClocks("checkLoop"))
KillableThread(target=updateChecker, daemon=True, name="Main: Updater").start()
KillableThread(target=isElevenClockRunningThread, daemon=True, name="Main: Instance controller").start()
if not getSettings("EnableLowCpuMode"): KillableThread(target=checkIfWokeUpThread, daemon=True, name="Main: Sleep listener").start()
if not getSettings("EnableLowCpuMode"): KillableThread(target=wnfDataThread, daemon=True, name="Main: WNF Data listener").start()
print("🔵 Low cpu mode is set to", str(getSettings("EnableLowCpuMode"))+". DisableNotifications is set to", getSettings("DisableNotifications"))
rdpThread = KillableThread(target=checkRDP, daemon=True, name="Main: Remote desktop controller")
if getSettings("EnableHideOnRDP"):
rdpThread.start()
globals.tempDir = tempDir # Register global variables
globals.old_stdout = old_stdout # Register global variables
globals.buffer = buffer # Register global variables
globals.app = app # Register global variables
globals.sw = sw # Register global variables
globals.trayIcon = i # Register global variables
globals.loadTimeFormat = loadTimeFormat # Register global functions
globals.updateIfPossible = updateIfPossible # Register global functions
globals.restartClocks = restartClocks # Register global functions
globals.closeClocks = closeClocks # Register global functions
if not(getSettings("Updated3.21Already")) and not(getSettings("EnableSilentUpdates")):
setSettings("ForceClockOnFirstMonitor", True)
setSettings("Updated3.21Already", True)
msg = QFramelessDialog(parent=None, closeOnClick=False)
msg.setAutoFillBackground(True)
msg.setStyleSheet(sw.styleSheet())
msg.setAttribute(QtCore.Qt.WA_StyledBackground)
msg.setObjectName("QMessageBox")
msg.setTitle("ElevenClock Updater")
msg.setText(f"""<b>ElevenClock has updated to version {versionName} successfully.</b>
<br><br>This update brings:<br>
<ul><li>The ability to specify a clock minimum width</li>
<li> The ability to search through the settings</li>
<li> Fixed an aesthetic issue with the seconds</li>
<li> Added a button to reset ElevenClock</li>
<li> Fixed an issue where ElevenClock would crash when clicking the right-click menu</li>
<li> Added Nynorsk</li>
<li> Some bugfixing and other improvements</li></ul>""")
msg.addButton("Ok", QDialogButtonBox.ButtonRole.ApplyRole, lambda: msg.close())
msg.addButton("Full changelog", QDialogButtonBox.ButtonRole.ResetRole, lambda: os.startfile("https://github.com/martinet101/ElevenClock/releases"))
def settNClose():
sw.show()
msg.close()
msg.addButton("Settings", QDialogButtonBox.ButtonRole.ActionRole, lambda: settNClose())
msg.setDefaultButtonRole(QDialogButtonBox.ButtonRole.ApplyRole, sw.styleSheet())
msg.setWindowTitle("ElevenClock has updated!")
msg.show()
showSettings = False
if "--settings" in sys.argv or showSettings:
sw.show()
if not getSettings("DefaultPrefsLoaded"):
setSettings("AlreadyInstalled", True)
setSettings("NewFullScreenMethod", True)
setSettings("ForceClockOnFirstMonitor", True)
showMessage("Welcome to ElevenClock", "You can customize Elevenclock from the ElevenClock Settings. You can search them on the start menu or right-clicking on any clock -> ElevenClock Settings", uBtn=False)
print("🟢 Default settings loaded")
setSettings("DefaultPrefsLoaded", True)
showWelcomeWizard = False
if showWelcomeWizard or "--welcome" in sys.argv:
import welcome
ww = welcome.WelcomeWindow()
print(f"🟢 Loaded everything in {time.time()-FirstTime}")
if "--quit-on-loaded" in sys.argv: # This is a testing feature to test if the script can load successfully
sys.exit(0)
app.exec_()
sys.exit(0)
except Exception as e:
import webbrowser, traceback, platform
if not "versionName" in locals() and not "versionName" in globals():
versionName = "Unknown"
if not "version" in locals() and not "version" in globals():
version = "Unknown"
os_info = f"" + \
f" OS: {platform.system()}\n"+\
f" Version: {platform.win32_ver()}\n"+\
f" OS Architecture: {platform.machine()}\n"+\
f" APP Architecture: {platform.architecture()[0]}\n"+\
f" APP Version: {versionName}\n"+\
f" APP Version Code: {version}\n"+\
f" Program: ElevenClock"+\
"\n\n-----------------------------------------------------------------------------------------"
traceback_info = "Traceback (most recent call last):\n"
try:
for line in traceback.extract_tb(e.__traceback__).format():
traceback_info += line
traceback_info += f"\n{type(e).__name__}: {str(e)}"
except:
traceback_info += "\nUnable to get traceback"
traceback_info += str(type(e))
traceback_info += ": "
traceback_info += str(e)
webbrowser.open(("https://www.somepythonthings.tk/error-report/?appName=ElevenClock&errorBody="+os_info.replace('\n', '{l}').replace(' ', '{s}')+"{l}{l}{l}{l}ElevenClock Log:{l}"+str("\n\n\n\n"+traceback_info).replace('\n', '{l}').replace(' ', '{s}')).replace("#", "|=|"))
print(traceback_info)
sys.exit(1)
|
import lxml.html
musicUrl= "http://books.toscrape.com/catalogue/category/books/music_14/index.html"
doc = lxml.html.parse(musicUrl)
#base element
articles = doc.xpath("//*[@id='default']/div/div/div/div/section/div[2]/ol/li[1]/article")[0]
#individual element inside base
title = articles.xpath("//h3/a/text()")
price = articles.xpath("//div[2]/p[contains(@class,'price_color')]/text()")
availability = articles.xpath("//div[2]/p[2][contains(@class,'availability')]/text()[normalize-space()]")
imageUrl = articles.xpath("//div[1][contains(@class,'image_container')]/a/img/@src")
starRating = articles.xpath("//p[contains(@class,'star-rating')]/@class")
#cleaning and formatting
stock = list(map(lambda stock:stock.strip(),availability))
images = list(map(lambda img:img.replace('../../../..','http://books.toscrape.com'),imageUrl))
rating = list(map(lambda rating:rating.replace('star-rating ',''),starRating))
print(title)
print(price)
print(stock)
print(images)
print(rating)
#Merging all
dataset = zip(title,price,stock,images,rating)
print(list(dataset))
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-monitor-query"
PACKAGE_PPRINT_NAME = "Azure Monitor Query"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
"Development Status :: 4 - Beta",
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
'samples',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.monitor',
]),
install_requires=[
'msrest>=0.6.19',
'azure-core<2.0.0,>=1.12.0',
],
extras_require={
":python_version<'3.0'": ['azure-monitor-nspkg'],
}
)
|
from .utils import *
from .funcs import *
def test_unit():
storage = Storage()
@op(storage)
def f(x:int) -> int:
return x + 1
@superop(storage)
def f_twice(x:int) -> int:
return f(f(x))
with run(storage, autocommit=True):
f_twice(42)
cg = storage.call_graph_st
nodes = cg.get_nodes()
assert nodes == [f_twice.op.qualified_name, f.op.qualified_name]
assert cg.get_neighbors(node=nodes[0]) == [f.op.qualified_name]
assert cg.get_callers(node=f.op.qualified_name) == [f_twice.op.qualified_name]
### now, check that we detect invalidation of previous version of calling superop
@op(storage, version='1')
def f(x:int) -> int:
return x - 1
# this should not work
try:
@superop(storage)
def f_twice(x:int) -> int:
return f(f(x))
assert False
except SynchronizationError:
assert True
except:
assert False
# this should work
try:
@superop(storage, version='1')
def f_twice(x:int) -> int:
return f(f(x))
assert True
except SynchronizationError:
assert False
except:
assert False
|
import pytest
from src.conanbuilder.remote import Remote
@pytest.fixture
def remote():
return Remote("myName", "myUrl")
def test_default_values(remote):
assert remote.name == "myName"
assert remote.url == "myUrl"
assert remote.verify_ssl is True
assert remote.priority == 0
assert remote.force is False
assert remote.login is False
|
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
# settings value
@register.simple_tag
def settings_value(name):
defaults = {
'SITE_HEADER': '<b>Map</b>Ground',
'SITE_TITLE': 'MapGround'
}
if name in defaults:
return mark_safe(getattr(settings, name, defaults[name]))
else:
return ''
|
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import tensorflow as tf
import tensorflow_datasets as tfds
np.random.seed(0)
ds, info = tfds.load('emnist', split='test', shuffle_files=False, with_info=True) # horribly slow
print(info)
plt.figure(figsize=(10, 10))
i = 0
for example in ds:
image = example["image"]
label = example["label"]
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image)
plt.title(label)
i += 1
if i >= 25: break
pml.savefig("emnist-data.pdf")
plt.show()
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@itemprop='name']",
'price' : "//div[@class='div-new-price']/span[@class='new-price']",
'category' : "//span[@class='item']/a[@itemprop='url']/span[@itemprop='title']",
'description' : "//div[@class='block-template-content']/div[@class='clearfix mt2x']",
'images' : "//div[@class='dsi-img full-cover ']/@data-image-hoverattribute",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : "//div[@class='infos prod-detail-brand']/a[@class='font-semibold brand-name']",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'lingo.vn'
allowed_domains = ['lingo.vn']
start_urls = ['http://lingo.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/[\w-]+-p\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[\w-]+-c\d+/($|\?page=\d+$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
import numpy as np
from sympy import *
def interpolate_cubic(p1, p2, k_traj, t):
'''
Computes a smooth cubic polynomail between 2 N-dimensional points
Input:
p1: Nx1 numpy array the first point
p2: Nx1 numpy array the second point
dp1: Nx1 numpy array of the required velocities at the first point
dp2: Nx1 numpy array of the required velocities at the second point
T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_third_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_quintic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_fifth_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_septic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_seventh_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_nonic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_ninth_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_trapezoid(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_trapezoid_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_minimum_jerk_derivative(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_minimum_jerk_derivative_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def get_normalized_first_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = t
dstau_dtau[i] = 1
ddstau_ddtau[i] = 0
dddstau_dddtau[i] = 0
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_third_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = -2 * (t ** 3) + 3 * (t ** 2)
dstau_dtau[i] = -6 * (t ** 2) + 6 * t
ddstau_ddtau[i] = -12 * t + 6
dddstau_dddtau[i] = -12
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_fifth_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = 6 * (t ** 5) - 15 * (t ** 4) + 10 * (t ** 3)
dstau_dtau[i] = 30 * (t ** 4) - 60 * (t ** 3) + 30 * (t ** 2)
ddstau_ddtau[i] = 120 * (t ** 3) - 180 * (t ** 2) + 60 * t
dddstau_dddtau[i] = 360 * (t ** 2) - 360 * t + 60
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_seventh_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = -20 * (t ** 7) + 70 * (t ** 6) - 84 * (t ** 5) + 35 * (t ** 4)
dstau_dtau[i] = -140 * (t ** 6) + 420 * (t ** 5) - 420 * (t ** 4) + 140 * (t ** 3)
ddstau_ddtau[i] = -840 * (t ** 5) + 2100 * (t ** 4) - 1680 * (t ** 3) + 420 * (t ** 2)
dddstau_dddtau[i] = -4200 * (t ** 4) + 8400 * (t ** 3) - 5040 * (t ** 2) + 840 * t
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_ninth_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(1, k_traj):
t = tau[i]
stau[i] = 70 * (t ** 9) - 315 * (t ** 8) + 540 * (t ** 7) - 420 * (t ** 6) + 126 * (t ** 5)
dstau_dtau[i] = 630 * (t ** 8) - 2520 * (t ** 7) + 3780 * (t ** 6) - 2520 * (t ** 5) + 630 * (t ** 4)
ddstau_ddtau[i] = 5040 * (t ** 7) - 17640 * (t ** 6) + 22680 * (t ** 5) - 12600 * (t ** 4) + 2520 * (t ** 3)
dddstau_dddtau[i] = 35280 * (t ** 6) - 105840 * (t ** 5) + 113400 * (t ** 4) - 50400 * (t ** 3) + 7560 * (
t ** 2)
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_trapezoid_polynomial(k_traj):
t_acc = 1 / 10.
t_ct = 1 - 2 * t_acc
v_m = 1.0 / (t_acc + t_ct)
x = t_acc
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
if 0 <= t <= x:
res = 0.5 * v_m * (t ** 2) / t_acc
vel = v_m * t / t_acc
elif x < t <= 1 - x:
res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * (t - t_acc)
vel = v_m
elif t > 1 - x:
res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * t_ct + v_m * (t - t_acc - t_ct) - 0.5 * v_m / t_acc * (
t - t_acc - t_ct) ** 2
vel = v_m - v_m / t_acc * (t - t_acc - t_ct)
else:
res = None
vel = None
stau[i] = res
dstau_dtau[i] = vel
for i in range(tau.size - 2):
dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_minimum_jerk_derivative_polynomial(k_traj):
x = (1 - np.sqrt(0.5)) / 2
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
res = None
for i in range(k_traj - 1):
t = tau[i]
if 0 <= t <= x:
res = 16 * (t ** 4)
elif x < t <= 0.5:
res = -16 * (t ** 4) + 128 * x * (t ** 3) - 192 * (x ** 2) * (t ** 2) + 128 * (x ** 3) * t - 32 * (x ** 4)
elif 0.5 < t <= 1 - x:
res = 1 + 16 * ((1 - t) ** 4) - 128 * x * ((1 - t) ** 3) + 192 * (x ** 2) * ((1 - t) ** 2) - 128 * (
x ** 3) * (1 - t) + 32 * (x ** 4)
elif 1 - x < t <= 1:
res = 1 - 16 * (1 - t) ** 4
stau[i] = res
for i in range(tau.size - 2):
dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_cubic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3)]
v0 = [0, 1, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt], dtype='float')
b = np.array([[0], [1], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_quintic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]
a0 = [0, 0, 2, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt, a0, at], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_septic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5), pow(1, 6), pow(1, 7)]
v0 = [0, 1, 0, 0, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4), 6 * pow(1, 5), 7 * pow(1, 6)]
a0 = [0, 0, 2, 0, 0, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3), 30 * pow(1, 4), 42 * pow(1, 5)]
j0 = [0, 0, 0, 6, 0, 0, 0, 0]
jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_nonic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]
a0 = [0, 0, 2, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]
j0 = [0, 0, 0, 6, 0, 0, 0, 0]
jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def interpolate_quint_2(p1, p2, dp1, dp2, ddp1, ddp2, k_traj, T):
'''
Computes a smooth quintic polynomial between 2 N-dimensional points
Input:
p1: Nx1 numpy array the first point
p2: Nx1 numpy array the second point
dp1: Nx1 numpy array of the required velocities at the first point
dp2: Nx1 numpy array of the required velocities at the second point
ddp1: Nx1 numpy array of the required accelerations the first point
ddp2: Nx1 numpy array of the required accelerations the second point
T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(dp1) == np.ndarray and type(dp2) == np.ndarray
assert type(ddp1) == np.ndarray and type(ddp2) == np.ndarray
assert type(k_traj) == int and (type(T) == float or type(T) == int)
# Kinematic equations for a quintic polynomial
x0 = [1, 0, 0, 0, 0, 0]
xT = [1, T, pow(T, 2), pow(T, 3), pow(T, 4), pow(T, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vT = [0, 1, 2 * T, 3 * pow(T, 2), 4 * pow(T, 3), 5 * pow(T, 4)]
a0 = [0, 0, 2, 0, 0, 0]
aT = [0, 0, 2, 6 * T, 12 * pow(T, 2), 20 * pow(T, 3)]
# Kinematic matrix
A = np.array([x0, xT, v0, vT, a0, aT], dtype='float')
# Interpolate
traj_list = []
dtraj_list = []
ddtraj_list = []
t = Symbol('t')
tv = np.linspace(0, T, k_traj)
for i in range(len(p1)):
B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]], [ddp1[i]], [ddp2[i]]], dtype='float')
x = np.linalg.solve(A, B)
traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3) + x[4, 0] * pow(t, 4) + x[
5, 0] * pow(t, 5)
dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2) + 4 * x[4, 0] * pow(t, 3) + 5 * x[
5, 0] * pow(t, 4)
ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t + 12 * x[4, 0] * pow(t, 2) + 20 * x[5, 0] * pow(t, 3)
traj_ = [traj.subs(t, tv_) for tv_ in tv]
dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]
ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
return traj, dtraj, ddtraj
def interpolate_cubic_2(p1, p2, k_traj, T, dp1=np.zeros((6, 1)), dp2=np.zeros((6, 1))):
'''
Computes a smooth cubic polynomal between 2 N-dimensional points
Input:
p1: Nx1 numpy array the first point
p2: Nx1 numpy array the second point
dp1: Nx1 numpy array of the required velocities at the first point
dp2: Nx1 numpy array of the required velocities at the second point
T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(dp1) == np.ndarray and type(dp2) == np.ndarray
assert type(k_traj) == int and (type(T) == float or type(T) == int)
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0]
xT = [1, T, pow(T, 2), pow(T, 3)]
v0 = [0, 1, 0, 0]
vT = [0, 1, 2 * T, 3 * pow(T, 2)]
# Kinematic matrix
A = np.array([x0, xT, v0, vT], dtype='float')
traj_list = []
dtraj_list = []
ddtraj_list = []
t = Symbol('t')
tv = np.linspace(0, T, k_traj)
for i in range(len(p1)):
B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]]], dtype='float')
x = np.linalg.solve(A, B)
traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3)
dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2)
ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t
traj_ = [traj.subs(t, tv_) for tv_ in tv]
dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]
ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
traj = np.array(traj_list)
dtraj = np.array(dtraj_list)
ddtraj = np.array(ddtraj_list)
return traj, dtraj, ddtraj
def interpolate_viapoints(p, v1, vn, k_traj, t):
'''
Computes a smooth cubic polynomal between M N-dimensional points
Input:
p: MxN numpy array containing all points
v1: Nx1 numpy array of the required velocities at the first point
vn: Nx1 numpy array of the required velocities at the last point
t: Mx1 numpy array of the timesteps at which the points should be reached
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p) == np.ndarray and type(k_traj) == int
# Compute time interval matrix
h = list(np.zeros((len(t) - 1, 1)))
for i in range(len(t) - 1):
h[i] = t[i + 1] - t[i]
# Compute A(h) matrix
A = np.zeros((len(h) - 1, len(h) - 1))
for i in range(len(h) - 1):
for j in range(len(h) - 1):
if i == j:
A[i][j] = 2 * (h[i] + h[i + 1])
if i == j + 1:
A[i][j] = h[i + 1]
if j == i + 1:
A[i][j] = h[i]
# Compute known B(p0,p1,h,v1,vn) matrix
B = np.zeros((len(h) - 1, len(p[0])))
for i in range(len(h) - 1):
B[i] = (3 / (h[i] * h[i + 1])) * (
pow(h[i], 2) * (np.subtract(p[i + 2], p[i + 1])) + pow(h[i + 1], 2) * (np.subtract(p[i + 1], p[i])))
B[0] = B[0] - np.dot(h[1], v1)
B[-1] = B[-1] - np.dot(h[-2], vn)
# Solve for all unknown velocities of intermediate knots
x = np.linalg.solve(A, B)
vel = [v1.copy()]
[vel.append(x[i]) for i in range(len(x))]
vel.append(vn.copy())
# Compute N-1 polynomials using computed velocities
traj = [[0], [0], [0], [0], [0], [0], [0]]
dtraj = [[0], [0], [0], [0], [0], [0], [0]]
ddtraj = [[0], [0], [0], [0], [0], [0], [0]]
for i in range(len(p) - 1):
traj_, dtraj_, ddtraj_ = interpolate_cubic_2(p[i], p[i + 1], k_traj, float(h[i]), vel[i], vel[i + 1])
for j in range(len(traj) - 1):
traj[j].extend(traj_[j])
dtraj[j].extend(dtraj_[j])
ddtraj[j].extend(ddtraj_[j])
traj[-1].extend(traj_[-1] + traj[-1][-1])
dtraj[-1].extend(dtraj_[-1] + dtraj[-1][-1])
ddtraj[-1].extend(ddtraj_[-1] + ddtraj[-1][-1])
traj = np.asarray(np.delete(traj, 0, 1))
dtraj = np.asarray(np.delete(traj, 0, 1))
ddtraj = np.asarray(np.delete(traj, 0, 1))
return traj, dtraj, ddtraj
|
from model.Sender import Sender
from model.SenderType import SenderType
import logging
import math
import numpy as np
class NoobSender(Sender):
def __init__(self, id, deliveryRate, debug=True):
super().__init__(id, SenderType.Noob, deliveryRate=deliveryRate, debug=debug)
def getNumberOfPacketsToCreateForTimeStep(self, timeStep):
num = math.floor(timeStep * self.deliveryRate) - math.floor((timeStep - 1) * self.deliveryRate)
# print(num)
# randomness
# if self.debug:
# logging.info(f"Sender #{self.id} creating {numberOfPackets} packets at {timeStep}")
# return math.floor( num * np.random.uniform(0.5, 1.1))
return num
def onTimeStepStart(self, timeStep):
"""To be called at the beginning of a timeStep
Args:
timeStep ([type]): [description]
"""
pass
def onTimeStepEnd(self, timeStep):
"""To be called at the end of a timeStep
Args:
timeStep ([type]): [description]
"""
pass
def onACK(self, packet):
super().onACK(packet)
# packet loss conditions:
# 1. ACK out of order.
# 2.
# if self.debug:
# logging.info(f"{self.getName()}: got ack for packet {packet.getPacketNumber()}")
pass
|
'''
-----------------------------------------------------------------------
Additional Documentation
Made by Zachary A Brader, Kieran Coito, Pedro Goncalves Mokarzel
while attending University of Washington Bothell
Made in 03/09/2020
Based on instruction in CSS 458,
taught by professor Johnny Lin
Notes:
- Written for Python 3.7.3.
- No executable
- Modules necessary: numpy, random, and matplotlib.pyplot
- External necessities: variables.py, cashier.py, customer.py, and
equal_distribution_line
- Creates line environment for the use of mode
- Holds lists with relevant to the line
- Holds cashiers and customers
- Used equal_distribution_line as a base for other lines
- Line will give a customer to cashier that looks like it will go the
fastest
=======================================================================
'''
# =======================================================================
# ============================= Imports==================================
# =======================================================================
import numpy as np
import random as r
import matplotlib.pyplot as plt
import variables as v
from cashier import cashier
from customer import customer
from equal_distribution_line import equal_distribution_line
# =======================================================================
# ================================= Class ===============================
# =======================================================================
class customer_selection_line(equal_distribution_line):
'''
Inherits equal_distribution_line
Line acts such that customer chooses the best line
'''
# List of customers in queue
# Implemented
customer_list = 0
# Array to keep track of automated cashier
# Implemented
automated_cashier_tracker = 0
# Maintain cost of maintenance for all lines
# Implemented
cost_for_maintenance = 0
# Not implemented
time_step = 0
# Number of cashiers in system
# implemented
number_of_cashiers = 0
# Total number of customers processed by the line
# Initialization implemented
total_number_of_customers = 0
# Customers currently being served
# implemented
customers_being_served = 0
# Total number of customers current line
# Implemented
customers_waiting_to_queue = 0
# Customers that have left the system at point of simulation
# Implemented
customers_that_left = 0
# Implementation
total_number_of_checked_items = 0
total_number_of_items_in_system = 0
def rotate_customers(self):
''' Rotate customers between the cashiers' queues from the lines
Customers go to the queue that they consider will go fast
Precondition:
- Customers and cashier related lists created
Postcondition:
- Removal of customers in the environment list, and then the addition to queues
'''
# number_of_customers_entering_queue = int(np.random.rand()*(self.number_of_cashiers-1)) +1
# test = []
# for i in range(1000):
# test.append(int(rej()*self.number_of_cashiers))
# plt.hist(test)
# plt.show()
for individual_cashier_iterator in range(len(self.cashier_list)):
if (len(self.customer_list) > 0):
# Updates waiting queue:
smallest_cashier = self.cashier_list[0]
for cashier_iterator in self.cashier_list:
if(smallest_cashier > cashier_iterator):
smallest_cashier = cashier_iterator
smallest_cashier.add_customer_to_queue(self.customer_list.pop())
self.customers_waiting_to_queue = self.customers_waiting_to_queue - 1
# self.cashier_list.sort()
|
__author__ = 'Danyang'
import logging
import sys
class LoggerFactory(object):
def getConsoleLogger(self, cls_name, level=logging.DEBUG):
lgr = logging.getLogger(cls_name)
lgr.setLevel(level)
if not lgr.handlers:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
lgr.addHandler(ch)
return lgr
|
from datetime import datetime
import datetime
def yesterday(today=datetime.datetime.now()):
yesterday = today - datetime.timedelta(days=1)
yesterday_timestamp = int(yesterday.timestamp()) * 1000
return yesterday_timestamp
def extractDate(name, prefix, fileType):
prefixLen = len(prefix)
fileTypeLen = len(fileType)
return name[prefixLen+1:-fileTypeLen]
|
from datetime import datetime
from ctyped.types import CRef
from .base import _ApiResourceBase
from .stats import CurrentApplicationAchievements
from .user import User
class Application(_ApiResourceBase):
"""Exposes methods to get application data.
Aliased as ``steampak.SteamApplication``.
.. code-block:: python
from steampak import SteamApplication
# We use `Spacewar` app ID. (This game is provided with SDK).
my_app = SteamApplication(480)
"""
def __init__(self, app_id, *args, **kwargs):
"""
:param int|None app_id: Application (game) ID.
"""
client = self.get_client()
self._iface = client.apps
self._iface_list = client.app_list
super().__init__(*args, **kwargs)
if app_id is not None: # Might be None for current app.
self.app_id = app_id
def __str__(self):
return self.name
@property
def owned(self):
"""``True`` if user owns the current app.
.. warning::
Only use this member if you need to check ownership of a game related to yours, a demo for example.
:rtype: bool
"""
return self._iface.get_is_subscribed(self.app_id)
@property
def installed(self):
"""``True`` if app is installed (not necessarily owned).
:rtype: bool
"""
return self._iface.get_is_installed(self.app_id)
@property
def name(self):
"""Application name, or None on error.
.. warning::
Restricted interface can only be used by approved apps.
:rtype: str
"""
return self._get_str(self._iface_list.get_name, [self.app_id])
@property
def install_dir(self):
"""Returns application installation path.
.. note::
If fails this falls back to a restricted interface, which can only be used by approved apps.
:rtype: str
"""
max_len = 500
directory = self._get_str(self._iface.get_install_dir, [self.app_id], max_len=max_len)
if not directory:
# Fallback to restricted interface (can only be used by approved apps).
directory = self._get_str(self._iface_list.get_install_dir, [self.app_id], max_len=max_len)
return directory
@property
def purchase_time(self):
"""Date and time of app purchase.
:rtype: datetime
"""
ts = self._iface.get_purchase_time(self.app_id)
return datetime.utcfromtimestamp(ts)
@property
def build_id(self):
"""Application Build ID.
This may change at any time based on backend updates.
.. warning::
Restricted interface can only be used by approved apps.
:rtype: int
"""
return self._iface_list.get_build_id(self.app_id)
class InstalledApplications(_ApiResourceBase):
"""Exposes methods to get data on installed applications.
Interface can be accessed through ``api.apps.installed``.
.. warning::
Restricted interface can only be used by approved apps.
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().app_list
super().__init__(*args, **kwargs)
def __len__(self):
"""Returns a number of currently installed applications.
:rtype: int
"""
return self._iface.get_installed_count()
def __call__(self):
"""Generator. Returns Application objects, representing currently installed applications.
:rtype: tuple(int, Application)
:return:
"""
max_count = len(self)
apps_ids = CRef.carray(int, size=max_count)
total = self._iface.get_installed(apps_ids, max_count)
for app_id in apps_ids:
yield app_id, Application(app_id)
def __iter__(self):
return iter(self())
class Dlc(Application):
"""Exposes methods to get downloadable content (DLC) data.
Aliased as ``steampak.SteamDlc``.
.. code-block:: python
from steampak import SeamDlc
# We use `Spacewar` DLC app ID. (Spacewar game is provided with SDK).
my_dlc = SeamDlc(110902)
Current application DLCs are available through ``CurrentApplication.dlcs``.
"""
def __init__(self, app_id):
self._iface = self.get_client().apps
super(Dlc, self).__init__(app_id)
self._name = None
self._available = None
@property
def installed(self):
"""``True`` if the user owns the DLC & if the DLC is installed.
:rtype: bool
"""
return self._iface.get_is_dlc_installed(self.app_id)
def install(self):
"""Installs DLC (for optional DLCs)."""
self._iface.dlc_install(self.app_id)
def uninstall(self):
"""Uninstalls DLC (for optional DLCs)."""
self._iface.dlc_uninstall(self.app_id)
def get_download_progress(self):
"""Returns tuple with download progress (for optional DLCs):
(bytes_downloaded, bytes_total)
:rtype: tuple
"""
downloaded = CRef.cint()
total = CRef.cint()
result = self._iface.get_dlc_download_progress(self.app_id, downloaded, total)
if not result:
return 0, 0
return int(downloaded), int(total)
@property
def name(self):
"""DLC name.
:rtype: str
"""
# Fallback to parent data if necessary.
return self._name or super().name
@property
def available(self):
"""True if DLC is available.
:rtype: bool
"""
return self._available
class CurrentApplicationDlcs(_ApiResourceBase):
"""Exposes methods to get downloadable content (DLC) data
for current application.
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
super().__init__(*args, **kwargs)
def __len__(self):
"""Returns a number of current application .
:rtype: int
:return:
"""
return self._iface.get_dlc_count()
def __call__(self):
"""Generator. Returns Dlc objects.
:rtype: tuple(int, Dlc)
:return:
"""
max_len = 300
for idx in range(len(self)):
app_id = CRef.cint()
available = CRef.cbool()
name = CRef.carray(str, size=max_len)
if not self._iface.get_dlc_by_index(idx, app_id, available, name, max_len):
continue
app_id = int(app_id)
dlc = Dlc(app_id)
# Populate data.
dlc._name = str(name)
dlc._available = bool(available)
yield app_id, dlc
def __iter__(self):
return iter(self())
class CurrentApplication(Application):
"""Exposes methods to get current application data.
Interface can be accessed through ``api.apps.current``.
.. code-block:: python
from steampak import SteamApi
api = SteamApi(LIBRARY_PATH, app_id=APP_ID)
print(api.apps.current.language_current)
"""
dlcs: CurrentApplicationDlcs = None
"""Interface to DLCs of current application.
.. code-block:: python
for dlc_id, dlc in api.apps.current.dlcs():
print('%s: %s' % (dlc_id, dlc.name))
"""
achievements: CurrentApplicationAchievements = None
"""Current application (game) achievements.
.. code-block:: python
for ach_name, ach in api.apps.current.achievements():
print('%s: %s' % (ach_name, ach.title))
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
self._iface_utils = self.get_client().utils
super().__init__(None, *args, **kwargs)
self.dlcs = CurrentApplicationDlcs()
self.achievements = CurrentApplicationAchievements()
@property
def app_id(self):
# Overrode to support parent class methods.
return self._iface_utils.get_app_id()
@property
def beta_name(self):
"""Current beta branch name, 'public' is the default branch.
:rtype: str
"""
return self._get_str(self._iface.get_name_beta, [])
@property
def build_id(self):
"""Current application Build ID.
This may change at any time based on backend updates.
.. warning::
Restricted interface can only be used by approved apps.
:rtype: int
"""
return self._iface.get_current_build_id()
@property
def language_current(self):
"""Current game language.
E.g.: english
:rtype: str
"""
return self._iface.get_current_language()
@property
def language_available(self):
"""List of available game languages.
E.g.: ['english', 'russian']
:rtype: list[str]
"""
return self._iface.get_available_languages().split(',')
@property
def vac_banned(self):
"""``True`` if the current app is banned by BIsVACBanned.
:rtype: bool
"""
return self._iface.get_is_vac_banned()
@property
def mode_cybercafe(self):
"""``True`` if the current app supports Valve Cybercafe Program.
:rtype: bool
"""
return self._iface.get_is_cybercafe()
@property
def mode_free_weekend(self):
"""``True`` if the user is subscribed to the current app through a free weekend.
Will return ``False`` for users who have a retail or other type of license.
.. note::
Before using, please ask your Valve technical contact how to package and secure your free weekened.
:rtype: bool
"""
return self._iface.get_is_free_weekend()
@property
def low_violence(self):
"""``True`` if the current app is low violence.
:rtype: bool
"""
return self._iface.get_is_low_violence()
@property
def owned(self):
"""``True`` if user owns the current app.
:rtype: bool
"""
return self._iface.get_is_owned()
@property
def owner(self):
"""Owner user. If different from current user, app is borrowed.
:rtype: User
"""
return User(self._iface.get_owner())
def mark_corrupt(self, only_files_missing=False):
"""Signal Steam that game files seems corrupt or missing.
:param bool only_files_missing: Set it to True if only files are missing.
:rtype: bool
"""
return self._iface.mark_corrupt(only_files_missing)
class Applications(_ApiResourceBase):
"""Exposes methods to get applications data."""
installed: InstalledApplications = None
"""Interface to installed applications.
.. code-block:: python
for app_id, app in api.apps.installed:
print('%s: %s' % (app_id, app.name))
"""
current: CurrentApplication = None
"""Interface to current application.
.. code-block:: python
print(api.apps.current.language_current)
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
super().__init__(*args, **kwargs)
self.installed = InstalledApplications()
self.current = CurrentApplication()
|
import mem_profile
import random
import time
names = ['John', 'Corey', 'Adam', 'Steve', 'Rick', 'Thomas']
majors = ['Math', 'Engineering', 'CompSci', 'Arts', 'Business']
print 'Memory (Before): {}Mb'.format(mem_profile.memory_usage_psutil())
def people_list(num_people):
result = []
for i in xrange(num_people):
person = {
'id': i,
'name': random.choice(names),
'major': random.choice(majors)
}
result.append(person)
return result
def people_generator(num_people):
for i in xrange(num_people):
person = {
'id': i,
'name': random.choice(names),
'major': random.choice(majors)
}
yield person
# t1 = time.clock()
# people = people_list(1000000)
# t2 = time.clock()
t1 = time.clock()
people = people_generator(1000000)
t2 = time.clock()
print 'Memory (After) : {}Mb'.format(mem_profile.memory_usage_psutil())
print 'Took {} Seconds'.format(t2-t1)
|
"""`jupytext` as a command line tool"""
import argparse
import glob
import json
import os
import re
import shlex
import subprocess
import sys
import warnings
from copy import copy
from tempfile import NamedTemporaryFile
from .combine import combine_inputs_with_outputs
from .compare import NotebookDifference, compare, test_round_trip_conversion
from .config import load_jupytext_config, notebook_formats
from .formats import (
_BINARY_FORMAT_OPTIONS,
_VALID_FORMAT_OPTIONS,
JUPYTEXT_FORMATS,
check_auto_ext,
check_file_version,
long_form_multiple_formats,
long_form_one_format,
short_form_one_format,
)
from .header import recursive_update
from .jupytext import create_prefix_dir, read, reads, write, writes
from .kernels import find_kernel_specs, get_kernel_spec, kernelspec_from_language
from .languages import _SCRIPT_EXTENSIONS
from .paired_paths import (
InconsistentPath,
base_path,
find_base_path_and_format,
full_path,
paired_paths,
)
from .pairs import latest_inputs_and_outputs, read_pair, write_pair
from .version import __version__
def system(*args, **kwargs):
"""Execute the given bash command"""
kwargs.setdefault("stdout", subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, _ = proc.communicate()
if proc.returncode:
raise SystemExit(proc.returncode)
return out.decode("utf-8")
def str2bool(value):
"""Parse Yes/No/Default string
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse"""
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
if value.lower() in ("no", "false", "f", "n", "0"):
return False
if value.lower() in ("d", "default", ""):
return None
raise argparse.ArgumentTypeError("Expected: (Y)es/(T)rue/(N)o/(F)alse/(D)efault")
def parse_jupytext_args(args=None):
"""Command line parser for jupytext"""
parser = argparse.ArgumentParser(
description="Jupyter Notebooks as Markdown Documents, Julia, Python or R Scripts",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Input
parser.add_argument(
"notebooks",
help="One or more notebook(s). "
"Notebook is read from stdin when this argument is empty.",
nargs="*",
)
parser.add_argument(
"--from",
dest="input_format",
help="Jupytext format for the input(s). Inferred from the "
"file extension and content when missing.",
)
# Destination format & act on metadata
parser.add_argument(
"--to",
dest="output_format",
help=(
"The destination format: 'ipynb', 'markdown' or 'script', or a file extension: "
"'md', 'Rmd', 'jl', 'py', 'R', ..., 'auto' (script extension matching the notebook language), "
"or a combination of an extension and a format name, e.g. {} ".format(
", ".join(
{
f"md:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".md"
}
)
)
+ " or {}. ".format(
", ".join(
{
f"py:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".py"
}
)
)
+ "The default format for scripts is the 'light' format, "
"which uses few cell markers (none when possible). "
"Alternatively, a format compatible with many editors is the "
"'percent' format, which uses '# %%%%' as cell markers. "
"The main formats (markdown, light, percent) preserve "
"notebooks and text documents in a roundtrip. Use the "
"--test and and --test-strict commands to test the roundtrip on your files. "
"Read more about the available formats at "
"https://jupytext.readthedocs.io/en/latest/formats.html"
),
)
# Destination file
parser.add_argument(
"-o",
"--output",
help="Destination file. Defaults to the original file, "
"with prefix/suffix/extension changed according to "
"the destination format. "
"Use '-' to print the notebook on stdout.",
)
parser.add_argument(
"--update",
action="store_true",
help="Preserve the output cells when the destination "
"notebook is an .ipynb file that already exists",
)
parser.add_argument(
"--set-formats",
type=str,
help="Turn the notebook or text document to one or more alternative representations "
"with e.g. '--set-formats ipynb,py:light'. "
"The --set-formats option also triggers the creation/update of all paired files",
)
# Action: convert(default)/version/list paired paths/sync/apply/test
action = parser.add_mutually_exclusive_group()
action.add_argument(
"--sync",
"-s",
help="Synchronize the content of the paired representations of "
"the given notebook. Input cells are taken from the file that "
"was last modified, and outputs are read from the ipynb file, "
"if present.",
action="store_true",
)
action.add_argument(
"--paired-paths",
"-p",
help="List the locations of the alternative representations for this notebook.",
action="store_true",
)
parser.add_argument(
"--format-options",
"--opt",
action="append",
help="Set format options with e.g. "
"'--opt comment_magics=true' or '--opt notebook_metadata_filter=-kernelspec'.",
)
parser.add_argument(
"--update-metadata",
default={},
type=json.loads,
help="Update the notebook metadata with the desired dictionary. "
"Argument must be given in JSON format. For instance, if you "
"want to activate a pairing in the generated file, use e.g. "
"""--update-metadata '{"jupytext":{"formats":"ipynb,py:light"}}' """
"See also the --opt and --set-formats options for other ways "
"to operate on the Jupytext metadata.",
)
parser.add_argument(
"--use-source-timestamp",
help="Set the modification timestamp of the output file(s) equal"
"to that of the source file, and keep the source file and "
"its timestamp unchanged.",
action="store_true",
)
parser.add_argument(
"--warn-only",
"-w",
action="store_true",
help="Only issue a warning and continue processing other notebooks "
"when the conversion of a given notebook fails",
)
action.add_argument(
"--test",
action="store_true",
help="Test that the notebook is stable under a round trip conversion, "
"up to the expected changes",
)
action.add_argument(
"--test-strict",
action="store_true",
help="Test that the notebook is strictly stable under a round trip conversion",
)
parser.add_argument(
"--stop",
"-x",
dest="stop_on_first_error",
action="store_true",
help="In --test mode, stop on first round trip conversion error, and report stack traceback",
)
# Pipe notebook inputs into other commands
parser.add_argument(
"--pipe",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and read the notebook back. For instance, reformat "
"your notebook with: "
"'jupytext notebook.ipynb --pipe black' "
"If you want to reformat it and sync the paired representation, execute: "
"'jupytext notebook.ipynb --sync --pipe black' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --pipe 'black {}'",
)
parser.add_argument(
"--diff",
"-d",
action="store_true",
help="Show the differences between (the inputs) of two notebooks",
)
parser.add_argument(
"--diff-format",
help="The text format used to show differences in --diff",
)
parser.add_argument(
"--check",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and test that the returned value is non zero. For "
"instance, test that your notebook is pep8 compliant with: "
"'jupytext notebook.ipynb --check flake8' "
"or run pytest on your notebook with: "
"'jupytext notebook.ipynb --check pytest' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --check 'pytest {}'",
)
parser.add_argument(
"--pipe-fmt",
default="auto:percent",
help="The format in which the notebook should be piped to other programs, "
"when using the --pipe and/or --check commands.",
)
# Execute the notebook
parser.add_argument(
"--set-kernel",
"-k",
type=str,
help="Set the kernel with the given name on the notebook. "
"Use '--set-kernel -' to set a kernel matching the current "
"environment on Python notebooks, and matching the notebook "
"language otherwise (get the list of available kernels with "
"'jupyter kernelspec list')",
)
parser.add_argument(
"--execute",
action="store_true",
help="Execute the notebook with the given kernel. In the "
"--pre-commit-mode, the notebook is executed only if a code "
"cell changed, or if some execution outputs are missing "
"or not ordered.",
)
parser.add_argument(
"--run-path",
type=str,
help="Execute the notebook at the given path (defaults to the notebook parent directory)",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Quiet mode: do not comment about files being updated or created",
)
parser.add_argument(
"--show-changes",
action="store_true",
help="Display the diff for each output file",
)
action.add_argument(
"--version",
"-v",
action="store_true",
help="Show jupytext's version number and exit",
)
parser.add_argument(
"--pre-commit",
action="store_true",
help="Ignore the notebook argument, and instead apply Jupytext "
"on the notebooks found in the git index, which have an "
"extension that matches the (optional) --from argument.",
)
parser.add_argument(
"--pre-commit-mode",
action="store_true",
help="This is a mode that is compatible with the pre-commit framework. "
"In this mode, --sync won't use timestamp but instead will "
"determines the source notebook as the element of the pair "
"that is added to the git index. An alert is raised if multiple inconsistent representations are "
"in the index. It also raises an alert after updating the paired files or outputs if those "
"files need to be added to the index. Finally, filepaths that aren't in the source format "
"you are trying to convert from are ignored.",
)
return parser.parse_args(args)
def jupytext(args=None):
"""Entry point for the jupytext script"""
args = parse_jupytext_args(args)
def log(text):
if not args.quiet:
sys.stdout.write(text + "\n")
if args.version:
log(__version__)
return 0
if args.pre_commit:
warnings.warn(
"The --pre-commit argument is deprecated. "
"Please consider switching to the pre-commit.com framework "
"(let us know at https://github.com/mwouts/jupytext/issues "
"if that is an issue for you)",
DeprecationWarning,
)
if args.notebooks:
raise ValueError(
"--pre-commit takes notebooks from the git index. Do not pass any notebook here."
)
args.notebooks = notebooks_in_git_index(args.input_format)
log("[jupytext] Notebooks in git index are:")
for nb_file in args.notebooks:
log(nb_file)
# Read notebook from stdin
if not args.notebooks:
if not args.pre_commit:
args.notebooks = ["-"]
if args.set_formats is not None:
# Replace empty string with None
args.update_metadata = recursive_update(
args.update_metadata, {"jupytext": {"formats": args.set_formats or None}}
)
args.sync = True
if args.paired_paths:
if len(args.notebooks) != 1:
raise ValueError("--paired-paths applies to a single notebook")
print_paired_paths(args.notebooks[0], args.input_format)
return 1
if args.run_path:
args.execute = True
if (
(args.test or args.test_strict)
and not args.output_format
and not args.output
and not args.sync
):
raise ValueError("Please provide one of --to, --output or --sync")
if (
not args.output_format
and not args.output
and not args.sync
and not args.pipe
and not args.diff
and not args.check
and not args.update_metadata
and not args.format_options
and not args.set_kernel
and not args.execute
):
raise ValueError(
"Please provide one of --to, --output, --set-formats, --sync, --pipe, --diff, "
"--check, --update-metadata, --format-options, --set-kernel or --execute"
)
if args.diff:
if (
len(args.notebooks) != 2
or args.output_format
or args.output
or args.sync
or args.pipe
or args.check
or args.update_metadata
or args.format_options
or args.set_kernel
or args.execute
):
raise ValueError(
"Please provide two notebooks after 'jupytext --diff'.\n"
"NB: Use --show-changes if you wish to see the changes in "
"a notebook being updated by Jupytext."
)
nb_file1, nb_file2 = args.notebooks
nb1 = read(nb_file1)
nb2 = read(nb_file2)
def fmt_if_not_ipynb(nb):
fmt = nb.metadata["jupytext"]["text_representation"]
if fmt["extension"] == ".ipynb":
return None
return short_form_one_format(fmt)
diff_fmt = (
args.diff_format or fmt_if_not_ipynb(nb1) or fmt_if_not_ipynb(nb2) or "md"
)
diff = compare(
writes(nb2, diff_fmt),
writes(nb1, diff_fmt),
nb_file2,
nb_file1,
return_diff=True,
)
sys.stdout.write(diff)
return
if args.output and len(args.notebooks) != 1:
raise ValueError("Please input a single notebook when using --output")
# Warn if '--to' is used in place of '--output'
if (
not args.output
and args.output_format
and "." in args.output_format
# a suffix is expected to start with one of these characters #901
and not args.output_format.startswith((".", "-", "_"))
and "//" not in args.output_format
):
def single_line(msg, *args, **kwargs):
return f"[warning] {msg}\n"
warnings.formatwarning = single_line
warnings.warn(
"You might have passed a file name to the '--to' option, "
"when a format description was expected. Maybe you want to use the '-o' option instead?"
)
if args.input_format:
args.input_format = long_form_one_format(args.input_format)
if args.output_format:
args.output_format = long_form_one_format(args.output_format)
set_format_options(args.output_format, args.format_options)
# Wildcard extension on Windows #202
notebooks = []
for pattern in args.notebooks:
if "*" in pattern or "?" in pattern:
# Exclude the .jupytext.py configuration file
notebooks.extend(glob.glob(pattern, recursive=True))
else:
notebooks.append(pattern)
# Count how many file have round-trip issues when testing
exit_code = 0
for nb_file in notebooks:
if not args.warn_only:
exit_code += jupytext_single_file(nb_file, args, log)
else:
try:
exit_code += jupytext_single_file(nb_file, args, log)
except Exception as err:
sys.stderr.write(f"[jupytext] Error: {str(err)}\n")
return exit_code
def jupytext_single_file(nb_file, args, log):
"""Apply the jupytext command, with given arguments, to a single file"""
if nb_file == "-" and args.sync:
msg = "Missing notebook path."
if args.set_formats is not None and os.path.isfile(args.set_formats):
msg += f" Maybe you mean 'jupytext --sync {args.set_formats}' ?"
raise ValueError(msg)
nb_dest = None
if args.output:
nb_dest = args.output
elif nb_file == "-":
nb_dest = "-"
else:
try:
bp = base_path(nb_file, args.input_format)
except InconsistentPath:
if args.pre_commit_mode:
log(
"[jupytext] Ignoring unmatched input path {}{}".format(
nb_file,
f" for format {args.input_format}" if args.input_format else "",
)
)
return 0
raise
if args.output_format:
nb_dest = full_path(bp, args.output_format)
config = load_jupytext_config(os.path.abspath(nb_file))
# Just acting on metadata / pipe => save in place
save_in_place = not nb_dest and not args.sync
if save_in_place:
nb_dest = nb_file
if nb_dest == "-":
args.quiet = True
# I. ### Read the notebook ###
fmt = copy(args.input_format) or {}
if not fmt:
ext = os.path.splitext(nb_file)[1]
if ext:
fmt = {"extension": ext}
if fmt:
set_format_options(fmt, args.format_options)
log(
"[jupytext] Reading {}{}".format(
nb_file if nb_file != "-" else "stdin",
f" in format {short_form_one_format(fmt)}" if "extension" in fmt else "",
)
)
notebook = read(nb_file, fmt=fmt, config=config)
if "extension" in fmt and "format_name" not in fmt:
text_representation = notebook.metadata.get("jupytext", {}).get(
"text_representation", {}
)
if text_representation.get("extension") == fmt["extension"]:
fmt["format_name"] = text_representation["format_name"]
# Compute actual extension when using script/auto, and update nb_dest if necessary
dest_fmt = args.output_format
if dest_fmt and dest_fmt["extension"] == ".auto":
dest_fmt = check_auto_ext(dest_fmt, notebook.metadata, "--to")
if not args.output and nb_file != "-":
nb_dest = full_path(base_path(nb_file, args.input_format), dest_fmt)
# Set the kernel
set_kernel = args.set_kernel
if (
(not set_kernel)
and args.execute
and notebook.metadata.get("kernelspec", {}).get("name") is None
):
set_kernel = "-"
if set_kernel:
if set_kernel == "-":
language = (
notebook.metadata.get("jupytext", {}).get("main_language")
or notebook.metadata["kernelspec"]["language"]
)
if not language:
raise ValueError(
"Cannot infer a kernel as notebook language is not defined"
)
kernelspec = kernelspec_from_language(language)
else:
try:
kernelspec = get_kernel_spec(set_kernel)
except KeyError as err:
raise KeyError(
"Please choose a kernel name among {}".format(
find_kernel_specs().keys()
)
) from err
kernelspec = {
"name": args.set_kernel,
"language": kernelspec.language,
"display_name": kernelspec.display_name,
}
log("[jupytext] Setting kernel {}".format(kernelspec.get("name")))
args.update_metadata["kernelspec"] = kernelspec
# Are we updating a text file that has a metadata filter? #212
if args.update_metadata or args.format_options:
if (
notebook.metadata.get("jupytext", {}).get("notebook_metadata_filter")
== "-all"
):
notebook.metadata.get("jupytext", {}).pop("notebook_metadata_filter")
# Update the metadata
if args.update_metadata:
log(
"[jupytext] Updating notebook metadata with '{}'".format(
json.dumps(args.update_metadata)
)
)
if (
"kernelspec" in args.update_metadata
and "main_language" in notebook.metadata.get("jupytext", {})
):
notebook.metadata["jupytext"].pop("main_language")
recursive_update(notebook.metadata, args.update_metadata)
# Read paired notebooks, except if the pair is being created
nb_files = [nb_file, nb_dest]
if args.sync:
formats = notebook_formats(
notebook, config, nb_file, fallback_on_current_fmt=False
)
set_prefix_and_suffix(fmt, formats, nb_file)
if args.set_formats is None:
try:
notebook, inputs_nb_file, outputs_nb_file = load_paired_notebook(
notebook, fmt, config, formats, nb_file, log, args.pre_commit_mode
)
nb_files = [inputs_nb_file, outputs_nb_file]
except NotAPairedNotebook as err:
sys.stderr.write("[jupytext] Warning: " + str(err) + "\n")
return 0
except InconsistentVersions as err:
sys.stderr.write("[jupytext] Error: " + str(err) + "\n")
return 1
else:
nb_files = [nb_file]
# II. ### Apply commands onto the notebook ###
# Pipe the notebook into the desired commands
prefix = None if nb_file == "-" else os.path.splitext(os.path.basename(nb_file))[0]
for cmd in args.pipe or []:
notebook = pipe_notebook(
notebook, cmd, args.pipe_fmt, prefix=prefix, warn_only=args.warn_only
)
# and/or test the desired commands onto the notebook
for cmd in args.check or []:
pipe_notebook(
notebook,
cmd,
args.pipe_fmt,
update=False,
prefix=prefix,
warn_only=args.warn_only,
)
if (
args.execute
and args.pre_commit_mode
and execution_counts_are_in_order(notebook)
and not code_cells_have_changed(notebook, nb_files)
):
log(
f"[jupytext] Execution of {shlex.quote(nb_file)} "
f"skipped as code cells have not changed and outputs are present."
)
args.execute = False
# Execute the notebook
if args.execute:
kernel_name = notebook.metadata.get("kernelspec", {}).get("name")
log(f"[jupytext] Executing notebook with kernel {kernel_name}")
if nb_dest is not None and nb_dest != "-":
nb_path = os.path.dirname(nb_dest)
elif nb_file != "-":
nb_path = os.path.dirname(nb_file)
else:
nb_path = None
run_path = args.run_path or nb_path
if args.run_path and not os.path.isdir(run_path):
# is this a relative directory?
for base_dir in [nb_path, os.getcwd()]:
try_path = os.path.join(base_dir, run_path)
if os.path.isdir(try_path):
run_path = try_path
break
if not os.path.isdir(run_path):
raise ValueError(f"--run-path={args.run_path} is not a valid path")
if run_path:
resources = {"metadata": {"path": run_path}}
else:
resources = {}
try:
from nbconvert.preprocessors import ExecutePreprocessor
exec_proc = ExecutePreprocessor(timeout=None, kernel_name=kernel_name)
exec_proc.preprocess(notebook, resources=resources)
except (ImportError, RuntimeError) as err:
if args.pre_commit_mode:
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that you have listed 'nbconvert' and 'ipykernel' "
"under 'additional_dependencies' in the jupytext hook."
) from err
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that 'nbconvert' and 'ipykernel' are installed."
) from err
# III. ### Possible actions ###
# a. Test round trip conversion
if args.test or args.test_strict:
try:
# Round trip from an ipynb document
if fmt["extension"] == ".ipynb":
test_round_trip_conversion(
notebook,
dest_fmt,
update=args.update,
allow_expected_differences=not args.test_strict,
stop_on_first_error=args.stop_on_first_error,
)
# Round trip from a text file
else:
with open(nb_file, encoding="utf-8") as fp:
org_text = fp.read()
# If the destination is not ipynb, we convert to/back that format
if dest_fmt["extension"] != ".ipynb":
dest_text = writes(notebook, fmt=dest_fmt)
notebook = reads(dest_text, fmt=dest_fmt)
text = writes(notebook, fmt=fmt, config=config)
if args.test_strict:
compare(text, org_text)
else:
# we ignore the YAML header in the comparison #414
comment = _SCRIPT_EXTENSIONS.get(fmt["extension"], {}).get(
"comment", ""
)
# white spaces between the comment char and the YAML delimiters are allowed
if comment:
comment = comment + r"\s*"
yaml_header = re.compile(
r"^{comment}---\s*\n.*\n{comment}---\s*\n".format(
comment=comment
),
re.MULTILINE | re.DOTALL,
)
compare(
re.sub(yaml_header, "", text), re.sub(yaml_header, "", org_text)
)
except (NotebookDifference, AssertionError) as err:
sys.stdout.write(f"{nb_file}: {str(err)}")
return 1
return 0
# b. Output to the desired file or format
untracked_files = 0
def lazy_write(path, fmt=None, action=None, update_timestamp_only=False):
"""Write the notebook only if it has changed"""
if path == "-":
write(notebook, "-", fmt=fmt)
return
nonlocal untracked_files
if update_timestamp_only:
modified = False
else:
_, ext = os.path.splitext(path)
fmt = copy(fmt or {})
fmt = long_form_one_format(fmt, update={"extension": ext})
new_content = writes(notebook, fmt=fmt, config=config)
diff = None
if not new_content.endswith("\n"):
new_content += "\n"
if not os.path.isfile(path):
modified = True
else:
with open(path, encoding="utf-8") as fp:
current_content = fp.read()
modified = new_content != current_content
if modified and args.show_changes:
diff = compare(
new_content,
current_content,
"",
"",
return_diff=True,
)
if modified:
# The text representation of the notebook has changed, we write it on disk
if action is None:
message = f"[jupytext] Updating {shlex.quote(path)}"
else:
message = "[jupytext] Writing {path}{format}{action}".format(
path=shlex.quote(path),
format=" in format " + short_form_one_format(fmt)
if fmt and "format_name" in fmt
else "",
action=action,
)
if diff is not None:
message += " with this change:\n" + diff
log(message)
create_prefix_dir(path, fmt)
with open(path, "w", encoding="utf-8") as fp:
fp.write(new_content)
# Otherwise, we only update the timestamp of the text file to make sure
# they remain more recent than the ipynb file, for compatibility with the
# Jupytext contents manager for Jupyter
if args.use_source_timestamp:
log(
f"[jupytext] Setting the timestamp of {shlex.quote(path)} equal to that of {shlex.quote(nb_file)}"
)
os.utime(path, (os.stat(path).st_atime, os.stat(nb_file).st_mtime))
elif not modified and not path.endswith(".ipynb"):
log(f"[jupytext] Updating the timestamp of {shlex.quote(path)}")
os.utime(path, None)
if args.pre_commit:
system("git", "add", path)
if args.pre_commit_mode and is_untracked(path):
log(
f"[jupytext] Error: the git index is outdated.\n"
f"Please add the paired notebook with:\n"
f" git add {shlex.quote(path)}"
)
untracked_files += 1
return
if nb_dest:
if nb_dest == nb_file and not dest_fmt:
dest_fmt = fmt
# Test consistency between dest name and output format
if dest_fmt and nb_dest != "-":
base_path(nb_dest, dest_fmt)
# Describe what jupytext is doing
if save_in_place:
action = ""
elif os.path.isfile(nb_dest) and args.update:
if not nb_dest.endswith(".ipynb"):
raise ValueError("--update is only for ipynb files")
action = " (destination file updated)"
check_file_version(notebook, nb_file, nb_dest)
notebook = combine_inputs_with_outputs(notebook, read(nb_dest), fmt=fmt)
elif os.path.isfile(nb_dest):
suggest_update = (
" [use --update to preserve cell outputs and ids]"
if nb_dest.endswith(".ipynb")
else ""
)
action = f" (destination file replaced{suggest_update})"
else:
action = ""
formats = notebook.metadata.get("jupytext", {}).get("formats")
formats = long_form_multiple_formats(formats)
if formats:
try:
base_path_out, _ = find_base_path_and_format(nb_dest, formats)
except InconsistentPath:
# Drop 'formats' if the destination is not part of the paired notebooks
formats = {}
notebook.metadata.get("jupytext", {}).pop("formats")
lazy_write(nb_dest, fmt=dest_fmt, action=action)
nb_dest_in_pair = formats and any(
os.path.exists(alt_path) and os.path.samefile(nb_dest, alt_path)
for alt_path, _ in paired_paths(nb_file, fmt, formats)
)
if (
nb_dest_in_pair
and os.path.isfile(nb_file)
and not nb_file.endswith(".ipynb")
and os.path.isfile(nb_dest)
and nb_dest.endswith(".ipynb")
):
# If the destination is an ipynb file and is in the pair, then we
# update the original text file timestamp, as required by our Content Manager
# Otherwise Jupyter will refuse to open the paired notebook #335
# NB: An alternative is --use-source-timestamp
lazy_write(nb_file, update_timestamp_only=True)
# c. Synchronize paired notebooks
elif args.sync:
write_pair(nb_file, formats, lazy_write)
return untracked_files
def notebooks_in_git_index(fmt):
"""Return the list of modified and deleted ipynb files in the git index that match the given format"""
git_status = system("git", "status", "--porcelain")
re_modified = re.compile(r"^[AM]+\s+(?P<name>.*)", re.MULTILINE)
modified_files_in_git_index = re_modified.findall(git_status)
files = []
for nb_file in modified_files_in_git_index:
if nb_file.startswith('"') and nb_file.endswith('"'):
nb_file = nb_file[1:-1]
try:
base_path(nb_file, fmt)
files.append(nb_file)
except InconsistentPath:
continue
return files
def is_untracked(filepath):
"""Check whether a file was created or modified and needs to be added to the git index"""
if not filepath:
return False
output = system("git", "ls-files", filepath).strip()
if output == "":
return True
output = system("git", "diff", filepath).strip()
if output != "":
return True
return False
def print_paired_paths(nb_file, fmt):
"""Display the paired paths for this notebook"""
notebook = read(nb_file, fmt=fmt)
formats = notebook.metadata.get("jupytext", {}).get("formats")
if formats:
for path, _ in paired_paths(nb_file, fmt, formats):
if path != nb_file:
sys.stdout.write(path + "\n")
def set_format_options(fmt, format_options):
"""Apply the desired format options to the format description fmt"""
if not format_options:
return
for opt in format_options:
try:
key, value = opt.split("=")
except ValueError as err:
raise ValueError(
"Format options are expected to be of the form key=value, not '{}'".format(
opt
)
) from err
if key not in _VALID_FORMAT_OPTIONS:
raise ValueError(
"'{}' is not a valid format option. Expected one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)
)
)
if key in _BINARY_FORMAT_OPTIONS:
value = str2bool(value)
fmt[key] = value
def set_prefix_and_suffix(fmt, formats, nb_file):
"""Add prefix and suffix information from jupytext.formats if format and path matches"""
for alt_fmt in long_form_multiple_formats(formats):
if alt_fmt["extension"] == fmt["extension"] and fmt.get(
"format_name"
) == alt_fmt.get("format_name"):
try:
base_path(nb_file, alt_fmt)
fmt.update(alt_fmt)
return
except InconsistentPath:
continue
class NotAPairedNotebook(ValueError):
"""An error raised when a notebook is not a paired notebook"""
class InconsistentVersions(ValueError):
"""An error raised when two paired files in the git index contain inconsistent representations"""
def file_in_git_index(path):
if not os.path.isfile(path):
return False
return system("git", "status", "--porcelain", path).strip().startswith(("M", "A"))
def git_timestamp(path):
if not os.path.isfile(path):
return None
# Files that are in the git index are considered most recent
if file_in_git_index(path):
return float("inf")
# Return the commit timestamp
try:
git_ts_str = system("git", "log", "-1", "--pretty=%ct", path).strip()
except SystemExit as err:
if err.code == 128:
# git not initialized
git_ts_str = ""
else:
raise
if git_ts_str:
return float(git_ts_str)
# The file is not in the git index
return get_timestamp(path)
def get_timestamp(path):
if not os.path.isfile(path):
return None
return os.lstat(path).st_mtime
def load_paired_notebook(notebook, fmt, config, formats, nb_file, log, pre_commit_mode):
"""Update the notebook with the inputs and outputs of the most recent paired files"""
if not formats:
raise NotAPairedNotebook(f"{shlex.quote(nb_file)} is not a paired notebook")
formats = long_form_multiple_formats(formats)
_, fmt_with_prefix_suffix = find_base_path_and_format(nb_file, formats)
fmt.update(fmt_with_prefix_suffix)
def read_one_file(path, fmt):
if path == nb_file:
return notebook
log(f"[jupytext] Loading {shlex.quote(path)}")
return read(path, fmt=fmt, config=config)
if pre_commit_mode and file_in_git_index(nb_file):
# We raise an error if two representations of this notebook in the git index are inconsistent
nb_files_in_git_index = sorted(
(
(alt_path, alt_fmt)
for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats)
if file_in_git_index(alt_path)
),
key=lambda x: 0 if x[1]["extension"] != ".ipynb" else 1,
)
if len(nb_files_in_git_index) > 1:
path0, fmt0 = nb_files_in_git_index[0]
with open(path0, encoding="utf-8") as fp:
text0 = fp.read()
for alt_path, alt_fmt in nb_files_in_git_index[1:]:
nb = read(alt_path, fmt=alt_fmt, config=config)
alt_text = writes(nb, fmt=fmt0, config=config)
if alt_text != text0:
diff = compare(alt_text, text0, alt_path, path0, return_diff=True)
raise InconsistentVersions(
f"{shlex.quote(alt_path)} and {shlex.quote(path0)} are inconsistent.\n"
+ diff
+ f"\nPlease revert JUST ONE of the files with EITHER\n"
f" git reset {shlex.quote(alt_path)} && git checkout -- {shlex.quote(alt_path)}\nOR\n"
f" git reset {shlex.quote(path0)} && git checkout -- {shlex.quote(path0)}\n"
)
inputs, outputs = latest_inputs_and_outputs(
nb_file, fmt, formats, git_timestamp if pre_commit_mode else get_timestamp
)
notebook = read_pair(inputs, outputs, read_one_file)
return notebook, inputs.path, outputs.path
def exec_command(command, input=None, capture=False, warn_only=False):
"""Execute the desired command, and pipe the given input into it"""
assert isinstance(command, list)
sys.stdout.write("[jupytext] Executing {}\n".format(" ".join(command)))
process = subprocess.Popen(
command,
**(
dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if input is not None
else {}
),
)
out, err = process.communicate(input=input)
if out and not capture:
sys.stdout.write(out.decode("utf-8"))
if err:
sys.stderr.write(err.decode("utf-8"))
if process.returncode:
msg = f"The command '{' '.join(command)}' exited with code {process.returncode}"
hint = (
"" if warn_only else " (use --warn-only to turn this error into a warning)"
)
sys.stderr.write(
f"[jupytext] {'Warning' if warn_only else 'Error'}: {msg}{hint}\n"
)
if not warn_only:
raise SystemExit(process.returncode)
return out
def pipe_notebook(
notebook, command, fmt="py:percent", update=True, prefix=None, warn_only=False
):
"""Pipe the notebook, in the desired representation, to the given command. Update the notebook
with the returned content if desired."""
if command in ["black", "flake8", "autopep8"]:
command = command + " -"
elif command in ["pytest", "unittest"]:
command = command + " {}"
fmt = long_form_one_format(
fmt, notebook.metadata, auto_ext_requires_language_info=False
)
fmt = check_auto_ext(fmt, notebook.metadata, "--pipe-fmt")
text = writes(notebook, fmt)
command = shlex.split(command)
if "{}" in command:
if prefix is not None:
prefix = prefix + (" " if " " in prefix else "_")
tmp_file_args = dict(
mode="w+",
encoding="utf8",
prefix=prefix,
suffix=fmt["extension"],
delete=False,
)
try:
tmp = NamedTemporaryFile(**tmp_file_args)
except TypeError:
# NamedTemporaryFile does not have an 'encoding' argument on pypy
tmp_file_args.pop("encoding")
tmp = NamedTemporaryFile(**tmp_file_args)
try:
tmp.write(text)
tmp.close()
exec_command(
[cmd if cmd != "{}" else tmp.name for cmd in command],
capture=update,
warn_only=warn_only,
)
if not update:
return notebook
piped_notebook = read(tmp.name, fmt=fmt)
finally:
os.remove(tmp.name)
else:
cmd_output = exec_command(
command, text.encode("utf-8"), capture=update, warn_only=warn_only
)
if not update:
return notebook
if not cmd_output:
sys.stderr.write(
"[jupytext] The command '{}' had no output. As a result, the notebook is empty. "
"Is this expected? If not, use --check rather than --pipe for this command.".format(
command
)
)
piped_notebook = reads(cmd_output.decode("utf-8"), fmt)
if fmt["extension"] != ".ipynb":
piped_notebook = combine_inputs_with_outputs(piped_notebook, notebook, fmt)
# Remove jupytext / text_representation entry
if "jupytext" in notebook.metadata:
piped_notebook.metadata["jupytext"] = notebook.metadata["jupytext"]
else:
piped_notebook.metadata.pop("jupytext", None)
return piped_notebook
def execution_counts_are_in_order(notebook):
"""Returns True if all the code cells have an execution count, ordered from 1 to N with no missing number"""
expected_execution_count = 1
for cell in notebook.cells:
if cell.cell_type == "code":
if cell.execution_count != expected_execution_count:
return False
expected_execution_count += 1
return True
def code_cells_have_changed(notebook, nb_files):
"""The source for the code cells has not changed"""
for nb_file in nb_files:
if not os.path.exists(nb_file):
return True
nb_ref = read(nb_file)
# Are the new code cells equals to those in the file?
ref = [cell.source for cell in nb_ref.cells if cell.cell_type == "code"]
new = [cell.source for cell in notebook.cells if cell.cell_type == "code"]
if ref != new:
return True
return False
|
"""
Django settings for spencer project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#-js))k7nx&)biw-=pso3u*o%&w@_wngqw0kq1l3ckhh5(52s@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'roles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spencer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spencer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'spencer',
'USER': 'spencer_django',
'PASSWORD': '9Ag91LaQjR$n',
'HOST': '',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
from exercises.structures.src.treasure_map import TreasureMap
tm = TreasureMap()
tm.populate_map()
def test_beach_key():
assert tm.map['beach'] == 'sandy shore'.casefold()
def test_coast_key():
assert tm.map['coast'] == 'ocean reef'.casefold()
def test_volcano_key():
assert tm.map['volcano'] == 'hot lava'.casefold()
def test_x_key():
assert tm.map['x'] == 'marks the spot'.casefold()
|
# from flask import Flask, Blueprint
# from flask_sqlalchemy import SQLAlchemy
# from flask_login import LoginManager
# import os
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
#id = db.Column(db.Integer, primary_key=True )
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
#id = db.Sequence('id', start=1, increment=1)
descripcion = db.Column(db.String(1000))
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
# Ruta para el login
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
#nick = request.form.get("nick")
#password = request.form.get("password")
#e_mail = request.form.get("e_mail")
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user: # si esto devuelve algo entonces el email existe
return jsonify({'error': 'Existe correo'}) #json diciendo error existe email
if nick:
return jsonify({'error': 'Existe nick'})
#if (check_email(e_mail) == True and check_password(data['password']) == True ):
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
# auth = request.authorization #new ESTO SI LO HACES CON AUTH
data= request.get_json()
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})#error mal user
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'}) #error mala contraseña
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
#"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
#db.session.commit()
#cambia_foto
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None: #data['cambia_foto']:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"): # articulo
print("xd")
guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"): # recomendacion
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
def guardarPDF(pdf,_id):
propia = Propia.query.filter_by(id=_id).first()
if pdf is not None:
file = pdf
print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
@app.route('/getPostsPropios', methods=['GET'])
@token_required
def getPostsPropios(current_user):
data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resulta = db.session.execute(a)
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
for r in results:
for i in range(data['id']-8,data['id']):
a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))
resulta = db.session.execute(a)
Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()
Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()
Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()
fila = {
"id": r.id,
"nick": current_user,
"descripcion":r.descripcion,
"timestamp":r.timestamp,
"pdf": 'http://51.255.50.207:5000/display2/' + a.pdf,
"nlikes": Gustas,
"ncomentarios": Comentarios,
"nguardados": Guardados,
"usuario": resulta.nombre_de_usuario
}
return fila
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/getPostsRecomendados', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
#data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resultb = db.session.execute(a)
Nombre_de_usuario = ""
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
# for record in results:
# print("\n", record)
vector0 =""
vector1 = []
vector2 = []
for r in results:
print(str(r[0]))
vector0 = vector0 + ","+ str(r[0])
vector1 += str(r.descripcion)
vector2 += str(r.timestamp)
# for r in results:
# for b in resultb:
# a = select([Recomendacion.id, Recomendacion.link,Recomendacion.titulo,Recomendacion.autor]).where((Recomendacion.id == r.id))
# resulta = db.session.execute(a)
# for a in resultaa:
# Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == r.id ).count()
# Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == r.id ).count()
# Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == r.id ).count()
print(vector0)
fila = {
"id": vector0,
#"link": a.link,
#"titulo": a.titulo,
#"autor": a.autor,
"nick": current_user,
"descripcion": vector1,
"timestamp": vector2,
#"nlikes": Gustas,
#"ncomentarios": Comentarios,
#"nguardados": Guardados,
"usuario": Nombre_de_usuario
}
return fila
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
# Contraseñas de entre 8 y 32 carácteres.
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
|
c = get_config()
# If the master config file uses syntax that's invalid in Python 3, we'll skip
# it and just use the factory defaults.
try:
load_subconfig('ipython_config.py', profile='default')
except Exception:
pass
else:
# We reset exec_lines in case they're not compatible with Python 3.
c.InteractiveShellApp.exec_lines = []
|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Union, Iterable
import numpy as np
from sklearn.metrics import matthews_corrcoef
from fastestimator.trace.meta._per_ds import per_ds
from fastestimator.trace.trace import Trace
from fastestimator.util.data import Any, Data, Dict
from fastestimator.util.traceability_util import traceable
from fastestimator.util.util import to_number
@per_ds
@traceable()
class MCC(Trace):
"""A trace which computes the Matthews Correlation Coefficient for a given set of predictions.
This is a preferable metric to accuracy or F1 score since it automatically corrects for class imbalances and does
not depend on the choice of target class (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6941312/). Ideal value is 1,
a value of 0 means your predictions are completely uncorrelated with the true data. A value less than zero implies
anti-correlation (you should invert your classifier predictions in order to do better).
Args:
true_key: Name of the key that corresponds to ground truth in the batch dictionary.
pred_key: Name of the key that corresponds to predicted score in the batch dictionary.
mode: What mode(s) to execute this Trace in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Trace in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
output_name: What to call the output from this trace (for example in the logger output).
per_ds: Whether to automatically compute this metric individually for every ds_id it runs on, in addition to
computing an aggregate across all ds_ids on which it runs. This is automatically False if `output_name`
contains a "|" character.
**kwargs: Additional keyword arguments that pass to sklearn.metrics.matthews_corrcoef()
Raises:
ValueError: One of ["y_true", "y_pred"] argument exists in `kwargs`.
"""
def __init__(self,
true_key: str,
pred_key: str,
mode: Union[None, str, Iterable[str]] = ("eval", "test"),
ds_id: Union[None, str, Iterable[str]] = None,
output_name: str = "mcc",
per_ds: bool = True,
**kwargs) -> None:
MCC.check_kwargs(kwargs)
super().__init__(inputs=(true_key, pred_key), mode=mode, outputs=output_name, ds_id=ds_id)
self.kwargs = kwargs
self.y_true = []
self.y_pred = []
self.per_ds = per_ds
@property
def true_key(self) -> str:
return self.inputs[0]
@property
def pred_key(self) -> str:
return self.inputs[1]
def on_epoch_begin(self, data: Data) -> None:
self.y_true = []
self.y_pred = []
def on_batch_end(self, data: Data) -> None:
y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])
if y_true.shape[-1] > 1 and y_true.ndim > 1:
y_true = np.argmax(y_true, axis=-1)
if y_pred.shape[-1] > 1 and y_pred.ndim > 1:
y_pred = np.argmax(y_pred, axis=-1)
else:
y_pred = np.round(y_pred)
assert y_pred.size == y_true.size
self.y_true.extend(y_true)
self.y_pred.extend(y_pred)
def on_epoch_end(self, data: Data) -> None:
data.write_with_log(self.outputs[0], matthews_corrcoef(y_true=self.y_true, y_pred=self.y_pred, **self.kwargs))
@staticmethod
def check_kwargs(kwargs: Dict[str, Any]) -> None:
"""Check if `kwargs` has any blacklist argument and raise an error if it does.
Args:
kwargs: Keywork arguments to be examined.
Raises:
ValueError: One of ["y_true", "y_pred"] argument exists in `kwargs`.
"""
blacklist = ["y_true", "y_pred"]
illegal_kwarg = [x for x in blacklist if x in kwargs]
if illegal_kwarg:
raise ValueError(
f"Arguments {illegal_kwarg} cannot exist in kwargs, since FastEstimator will later directly use them in"
" sklearn.metrics.matthews_corrcoef()")
|
from django.contrib import admin
from . import models
admin.site.register(models.Bark)
|
"""Utilities for ImageNet data preprocessing & prediction decoding.
"""
import json
import keras.utils.data_utils as data_utils
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
# Arguments
preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return.
# Returns
A list of lists of top class prediction tuples
`(class_name, class_description)`.
One list of tuples per sample in batch input.
# Raises
ValueError: In case of invalid shape of the `preds` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 5:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 5)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = data_utils.get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred[:min(top, 5)]
result = [tuple(CLASS_INDEX[str(i)]) for i in top_indices]
results.append(result)
return results
|
import copy
from lto.accounts.ecdsa.account_factory_ecdsa import AccountFactoryECDSA
import base58
import pytest
from lto.transactions.anchor import Anchor
class TestAccountECDSA():
factory = AccountFactoryECDSA('L')
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = factory.create()
def test_make_key(self):
assert self.factory._MakeKey(self.seed).to_string() == (b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%')
#@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_address(self):
assert self.factory.create_address(self.account.public_key) == self.account.address
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_sign_keys(self):
private_key, public_key, key_type = self.factory.create_sign_keys(self.seed)
assert self.account.public_key == public_key
assert self.account.private_key == private_key
assert key_type == 'secp256k1'
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_public(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_public_key(account.public_key)
# object
assert account.address == account2.address
assert account.public_key == account2.public_key
# bytes
public_key = b"5\xcf4\xeb\xe0\xd5,s\x00t\xc6to\x8b\xd0\x0e\xf8N\xe6\xa1\x1d\x13\x18s+\x11\x82\x7fR\x8d='\x03!a\x13H\xca=]\x8aV\xf71\x16C\x0c\x9ad{\x14z\x8e1\x9dg\x8b\xb2\xf2\x9e\x0fo\xa7\x9d"
account3 = AccountFactoryECDSA('T').create_from_public_key(public_key)
assert account.address == account3.address
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_public_key(base58.b58encode(public_key))
assert account.address == account4.address
assert account.public_key == account4.public_key
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_private_key(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_private_key(account.private_key)
# object
assert account.address == account2.address
assert account.private_key == account2.private_key
assert account.public_key == account2.public_key
# bytes
private_key = b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%'
account3 = AccountFactoryECDSA('T').create_from_private_key(private_key)
assert account.address == account3.address
assert account.private_key == account3.private_key
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_private_key(base58.b58encode(private_key))
assert account.address == account4.address
assert account.private_key == account4.private_key
assert account.public_key == account4.public_key
def test_verify_random_account_signed_transaction(self):
account = self.factory.create()
transaction = Anchor('rtrtrtr')
transaction.sign_with(account)
cloned_tx = copy.copy(transaction)
cloned_tx.proofs = []
message = cloned_tx.to_binary()
assert account.verify_signature(message, transaction.proofs[0]) is True
|
import subprocess
from consolemenu.items import ExternalItem
class CommandItem(ExternalItem):
"""
A menu item to execute a console command
"""
def __init__(self, text, command, arguments=None, menu=None, should_exit=False):
"""
:ivar str command: The console command to be executed
:ivar list[str] arguments: An optional list of string arguments to be passed to the command
:ivar int exit_status: the exit status of the command, None if it hasn't been run yet
"""
super(CommandItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
self.command = command
if arguments:
self.arguments = arguments
else:
self.arguments = []
self.exit_status = None
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True)
def get_return(self):
"""
:return: the exit status of the command
:rtype: int
"""
return self.exit_status
|
from tests.base import BaseTestCase
from binpacking.solver.data_structure.solution import Solution
from binpacking.solver.statistics import Statistics, StatisticIteration, StatisticFitness
class StatisticsTest(BaseTestCase):
def test_statistics(self) -> None:
iteration = StatisticIteration()
fitness = StatisticFitness()
statistics = Statistics()
statistics.add_statistic(iteration)
statistics.add_statistic(fitness)
expected_size = 4
sol = Solution(expected_size)
sol.set_fitness(float(42))
r = statistics.run(sol)
self.assertTrue(r['iteration'] == 0)
r = statistics.run(sol)
self.assertTrue(r['iteration'] == 1)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Database']
class Database(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an Athena database.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
hoge_bucket = aws.s3.Bucket("hogeBucket")
hoge_database = aws.athena.Database("hogeDatabase",
name="database_name",
bucket=hoge_bucket.bucket)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: Name of s3 bucket to save the results of the query execution.
:param pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']] encryption_configuration: The encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. An `encryption_configuration` block is documented below.
:param pulumi.Input[bool] force_destroy: A boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable.
:param pulumi.Input[str] name: Name of the database to create.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bucket is None:
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['encryption_configuration'] = encryption_configuration
__props__['force_destroy'] = force_destroy
__props__['name'] = name
super(Database, __self__).__init__(
'aws:athena/database:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'Database':
"""
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: Name of s3 bucket to save the results of the query execution.
:param pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']] encryption_configuration: The encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. An `encryption_configuration` block is documented below.
:param pulumi.Input[bool] force_destroy: A boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable.
:param pulumi.Input[str] name: Name of the database to create.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bucket"] = bucket
__props__["encryption_configuration"] = encryption_configuration
__props__["force_destroy"] = force_destroy
__props__["name"] = name
return Database(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def bucket(self) -> pulumi.Output[str]:
"""
Name of s3 bucket to save the results of the query execution.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="encryptionConfiguration")
def encryption_configuration(self) -> pulumi.Output[Optional['outputs.DatabaseEncryptionConfiguration']]:
"""
The encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. An `encryption_configuration` block is documented below.
"""
return pulumi.get(self, "encryption_configuration")
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
A boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable.
"""
return pulumi.get(self, "force_destroy")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the database to create.
"""
return pulumi.get(self, "name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import unittest
from selenium import webdriver
class Typos(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path = r'./chromedriver.exe')
driver = self.driver
driver.get('http://the-internet.herokuapp.com/')
driver.find_element_by_link_text('Typos').click()
def test_find_typo(self):
driver = self.driver
paragraph_to_check = driver.find_element_by_css_selector('#content > div > p:nth-child(3)')
text_to_check = paragraph_to_check.text
print(text_to_check)
tries = 1
found = False
correct_text = 'Sometimes you\'ll see a typo, other times you won\'t.'
while text_to_check != correct_text:
paragraph_to_check = driver.find_element_by_css_selector('#content > div > p:nth-child(3)')
text_to_check = paragraph_to_check.text
driver.refresh()
tries += 1
while not found:
if text_to_check == correct_text:
driver.refresh()
found = True
self.assertEqual(found, True)
print(f'it took {tries} to find the typo')
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
unittest.main()
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'isValid' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def isValid(s):
# Write your code here
# Write your code here
freq = {i : s.count(i) for i in set(s)}
fv = list(freq.values())
ffreq = {v : list(fv).count(v) for v in set(fv)}
print("s:",s, "\nfreq:", freq, "\nfv:", fv, "\nffreq:", ffreq)
if len(ffreq)>2:
return "NO"
elif len(ffreq)<=1:
return "YES"
else:
mx = max(ffreq)
mn = min(ffreq)
print("mx:", mx, " mn:", mn)
if (mn==1) and ffreq.get(mn, 0)<=1:
return "YES"
if abs(mx - mn)>1:
return "NO"
if min(ffreq.values()) > 1:
return "NO"
else:
return "YES"
if __name__ == '__main__':
fptr = open('CON', 'w')
s = input()
result = isValid(s)
fptr.write(result + '\n')
fptr.close()
|
#
# Account information
#
# Copy this file to account.py and fill in the real values for the Minecraft account.
#
#
#
#
account = {
"user" : 'your@login.com',
"password" : 'your_password',
"master" : 'minecraft_name_who_the_bot_will_listen_to',
"host" : 'exampleserver.whatever.com',
"version" : '1.16.5',
}
#
# List of world locations you can use in commands
#
locations = {
"minedrop": [29,13,-19],
"farmdrop": [42.5,89,-15.5],
"minecenter": [20.5,12,-23.5],
}
|
from django.shortcuts import render,redirect
from django.contrib import messages
from django.template import Context
from .models import Court, CourtManager, SelectedCourt
from apps.users.models import User
from datetime import datetime
from decimal import Decimal
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, "courts/index.html")
def main(request):
context = {
'court' : Court.objects.all()
}
return render(request, "courts/main.html", context)
def court(request, courtid):
context = {
'one_court' : Court.objects.get(id=courtid)
}
return render(request, "courts/courts.html", context)
def select(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
context = {
'courts' : Court.objects.all()
}
return render(request, "courts/select.html", context)
"""
This is logic that checks the times that a court has been reserved.
"""
def schedule(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/", context)
usr = User(id=request.session['user_id'])
crt = Court.objects.get(id=request.POST['courtid'])
intime = request.POST['timein']
outtime = request.POST['timeout']
dform = "%Y-%m-%d %H:%M"
diff = datetime.strptime(outtime, dform) - datetime.strptime(intime, dform)
hours = diff.seconds/3600
if hours < 4 and hours > 0:
total_price = Decimal(hours) * crt.price
if intime > outtime:
context = {
'courts' : Court.objects.all(),
'message': "End date/time is earlier than begin date/time."
}
elif intime <= datetime.now().strftime(dform):
context = {
'courts' : Court.objects.all(),
'message': "Begin date/time is in the past."
}
else:
SelectedCourt.objects.create(user=usr, court=crt, timein=intime, timeout=outtime, total_price=total_price)
context = {
'courts' : Court.objects.all()
}
else:
context = {
'courts' : Court.objects.all(),
'message': "Scheduled time is too long."
}
return render(request, "courts/select.html", context)
"""
This presents a dashboard which shows court reservations.
"""
def dashboard(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
usr = User(id=request.session['user_id'])
context = {
'court_times' : SelectedCourt.objects.filter(user=usr)
}
return render(request, "courts/dashboard.html", context)
def search(request):
return render(request, "courts/search.html")
def searchzip(request):
return "HELLO WORLD"
|
import unittest
import os
import numpy as np
import pandas as pd
from scipy.signal import StateSpace
import matplotlib.pyplot as plt
import mshoot
def cfun(xdf, ydf):
"""
:param ydf: DataFrame, model states
:param ydf: DataFrame, model outputs
:return: float
"""
qout = ydf['qout'].values
c = np.sum(qout ** 2) / qout.size
return c
class TestMPC(unittest.TestCase):
def setUp(self):
fmupath = os.path.join('resources', 'fmus', 'R1C1', 'R1C1.fmu')
parameters = {'C': 1e6, 'R': 0.01}
self.model = mshoot.SimFMU(
fmupath,
outputs=['qout', 'Tr'],
states=['heatCapacitor.T'],
parameters=parameters,
verbose=False)
def tearDown(self):
pass
def test_mpc(self):
# Inputs
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
# Bounds
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
# Initial state
x0 = [293.65]
# Optimization
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=inp.copy(),
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
# ax = u.plot(title='u')
# ax.set_ylim(0, 4000)
# ax = xemu.plot(title='xemu')
# ax.set_ylim(292.15, 296.15)
# plt.show()
# Assert the solution is correct
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer
# Validate emulation with optimized control
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
# self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
# * FMU results might be shifted in time by one time step.
# The reason is unknown, but FMU- or pyFMI-specific.
def test_mpc_inp_clb(self):
# Inputs
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
# Bounds
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
# Initial state
x0 = [293.65]
# Input callback function
def inp_clb(index):
return inp.loc[index]
# Optimization
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=None,
inp_clb=inp_clb,
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
# Assert the solution is correct
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer
# Validate emulation with optimized control
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
# self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
# * FMU results might be shifted in time by one time step.
# The reason is unknown, but FMU- or pyFMI-specific.
# def test_2_inputs(self):
# """THE SOLVER HAS PROBLEMS WITH GETTING THE RIGHT SOLUTION. (?)"""
# # Inputs
# t = np.arange(0, 3600 * 10, 3600)
# inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
# inp['q'] = np.full(t.size, 0)
# inp['Tout'] = np.full(t.size, 273.15)
# # Bounds
# ubounds = [(0., 10000.), (272.15, 275.)] # <-- Solver should try to yield Tout = 275
# xbounds = [(293.15, 296.15)]
# # Initial state
# x0 = [293.65]
# # Optimization
# mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
# u, xctr, xemu, yemu, uhist = mpc.optimize(
# model=self.model,
# inp=inp,
# free=['q', 'Tout'],
# ubounds=ubounds,
# xbounds=xbounds,
# x0=x0,
# unominal=[4000., 273.15],
# ynominal=[4000., 293.15],
# step=1,
# horizon=4
# )
# ax = u.plot(title='u', subplots=True)
# ax = xemu.plot(title='xemu')
# plt.show()
# # Assert the solution is correct
# self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.01)
# # Validate emulation with optimized control
# inp['q'] = u['q']
# yvld, xvld = self.model.simulate(inp, x0)
# # self.assertTrue((yvld - yemu < 1e-3).all().all()) # Might not be true for FMUs *
# # self.assertTrue((xvld - xemu < 1e-3).all().all()) # Might not be true for FMUs *
# # * FMU results might be shifted in time by one time step.
# # The reason is unknown, but FMU- or pyFMI-specific.
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
The main user-facing module of ``edges-cal``.
This module contains wrappers around lower-level functions in other modules, providing
a one-stop interface for everything related to calibration.
"""
from __future__ import annotations
import attr
import h5py
import numpy as np
import tempfile
import warnings
import yaml
from abc import ABCMeta, abstractmethod
from astropy.convolution import Gaussian1DKernel, convolve
from copy import copy
from edges_io import io
from edges_io.logging import logger
from functools import lru_cache
from hashlib import md5
from matplotlib import pyplot as plt
from pathlib import Path
from scipy.interpolate import InterpolatedUnivariateSpline as Spline
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from . import DATA_PATH
from . import modelling as mdl
from . import receiver_calibration_func as rcf
from . import reflection_coefficient as rc
from . import s11_correction as s11
from . import tools
from . import types as tp
from . import xrfi
from .cached_property import cached_property
from .tools import EdgesFrequencyRange, FrequencyRange
class S1P:
def __init__(
self,
s1p: tp.PathLike | io.S1P,
f_low: float | None = None,
f_high: float | None = None,
switchval: int | None = None,
):
"""
An object representing the measurements of a VNA.
The measurements are read in via a .s1p file
Parameters
----------
s1p : str, Path or :class:`io.S1P`
The path to a valid .s1p file containing VNA measurements, or an S1P
object of such a type.
f_low, f_high : float
The minimum/maximum frequency to keep.
switchval : int
The standard value of the switch for the component.
"""
try:
s1p = Path(s1p)
self.s1p = io.S1P(s1p)
except TypeError:
if isinstance(s1p, io.S1P):
self.s1p = s1p
else:
raise TypeError(
"s1p must be a path to an s1p file, or an io.S1P object"
)
self.load_name = self.s1p.kind
self.repeat_num = self.s1p.repeat_num
spec = self.s1p.s11
f = self.s1p.freq
self.freq = FrequencyRange(f, f_low, f_high)
self.s11 = spec[self.freq.mask]
self._switchval = switchval
@cached_property
def switchval(self):
"""The standard value of the switch for the component."""
if self._switchval is not None:
return self._switchval * np.ones_like(self.freq.freq)
else:
return None
# For backwards compatibility
VNA = S1P
class _S11Base(metaclass=ABCMeta):
default_nterms = {
"ambient": 37,
"hot_load": 37,
"open": 105,
"short": 105,
"AntSim2": 55,
"AntSim3": 55,
"AntSim4": 55,
"lna": 37,
}
def __init__(
self,
*,
load_s11: Union[io._S11SubDir, io.ReceiverReading],
f_low: Optional[float] = None,
f_high: Optional[float] = None,
n_terms: Optional[int] = None,
model_type: tp.Modelable = "fourier",
):
"""
A class representing relevant switch corrections for a load.
Parameters
----------
load_s11 : :class:`io._S11SubDir`
An instance of the basic ``io`` S11 folder.
f_low : float
Minimum frequency to use. Default is all frequencies.
f_high : float
Maximum frequency to use. Default is all frequencies.
resistance : float
The resistance of the switch (in Ohms).
n_terms : int
The number of terms to use in fitting a model to the S11 (used to both
smooth and interpolate the data). Must be odd.
"""
self.load_s11 = load_s11
self.base_path = self.load_s11.path
try:
self.load_name = getattr(self.load_s11, "load_name")
except AttributeError:
self.load_name = None
self.run_num = self.load_s11.run_num
switchvals = {"open": 1, "short": -1, "match": 0}
for name in self.load_s11.STANDARD_NAMES:
setattr(
self,
name.lower(),
S1P(
s1p=self.load_s11.children[name.lower()],
f_low=f_low,
f_high=f_high,
switchval=switchvals.get(name.lower()),
),
)
# Expose one of the frequency objects
self.freq = self.open.freq
self._nterms = int(n_terms) if n_terms is not None else None
self.model_type = model_type
@cached_property
def n_terms(self):
"""Number of terms to use (by default) in modelling the S11.
Raises
------
ValueError
If n_terms is even.
"""
res = self._nterms or self.default_nterms.get(self.load_name, None)
if not (isinstance(res, int) and res % 2):
raise ValueError(
f"n_terms must be odd for S11 models. For {self.load_name} got "
f"n_terms={res}."
)
return res
@classmethod
@abstractmethod
def from_path(cls, **kwargs):
pass # pragma: no cover
@cached_property
@abstractmethod
def measured_load_s11_raw(self):
pass # pragma: no cover
@cached_property
def corrected_load_s11(self) -> np.ndarray:
"""The measured S11 of the load, corrected for internal switch."""
return self.measured_load_s11_raw
@lru_cache()
def get_corrected_s11_model(
self,
n_terms: int | None = None,
model_type: tp.Modelable | None = None,
):
"""Generate a callable model for the S11 correction.
This should closely match :method:`s11_correction`.
Parameters
----------
n_terms : int
Number of terms used in the fourier-based model. Not necessary if
`load_name` is specified in the class.
Returns
-------
callable :
A function of one argument, f, which should be a frequency in the same units
as `self.freq.freq`.
Raises
------
ValueError
If n_terms is not an integer, or not odd.
"""
n_terms = n_terms or self.n_terms
model_type = mdl.get_mdl(model_type or self.model_type)
model = model_type(
n_terms=n_terms,
transform=mdl.UnitTransform(range=[self.freq.min, self.freq.max]),
)
emodel = model.at(x=self.freq.freq)
cmodel = mdl.ComplexMagPhaseModel(mag=emodel, phs=emodel)
s11_correction = self.corrected_load_s11
return cmodel.fit(ydata=s11_correction)
@cached_property
def s11_model(self) -> callable:
"""The S11 model."""
return self.get_corrected_s11_model()
def plot_residuals(
self,
fig=None,
ax=None,
color_abs="C0",
color_diff="g",
label=None,
title=None,
decade_ticks=True,
ylabels=True,
) -> plt.Figure:
"""
Make a plot of the residuals of the S11 model and the correction data.
Residuals obtained via :func:`get_corrected_s11_model`
Returns
-------
fig :
Matplotlib Figure handle.
"""
if fig is None or ax is None or len(ax) != 4:
fig, ax = plt.subplots(
4, 1, sharex=True, gridspec_kw={"hspace": 0.05}, facecolor="w"
)
if decade_ticks:
for axx in ax:
axx.xaxis.set_ticks(
[50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180],
minor=[],
)
axx.grid(True)
ax[-1].set_xlabel("Frequency [MHz]")
corr = self.corrected_load_s11
model = self.s11_model(self.freq.freq)
ax[0].plot(
self.freq.freq, 20 * np.log10(np.abs(model)), color=color_abs, label=label
)
if ylabels:
ax[0].set_ylabel(r"$|S_{11}|$")
ax[1].plot(self.freq.freq, np.abs(model) - np.abs(corr), color_diff)
if ylabels:
ax[1].set_ylabel(r"$\Delta |S_{11}|$")
ax[2].plot(
self.freq.freq, np.unwrap(np.angle(model)) * 180 / np.pi, color=color_abs
)
if ylabels:
ax[2].set_ylabel(r"$\angle S_{11}$")
ax[3].plot(
self.freq.freq,
np.unwrap(np.angle(model)) - np.unwrap(np.angle(corr)),
color_diff,
)
if ylabels:
ax[3].set_ylabel(r"$\Delta \angle S_{11}$")
if title is None:
title = f"{self.load_name} Reflection Coefficient Models"
if title:
fig.suptitle(f"{self.load_name} Reflection Coefficient Models", fontsize=14)
if label:
ax[0].legend()
return fig
class LoadS11(_S11Base):
def __init__(self, *, internal_switch: s11.InternalSwitch, **kwargs):
"""S11 for a lab calibration load.
Parameters
----------
internal_switch : :class:`s11.InternalSwitch`
The internal switch state corresponding to the load.
Other Parameters
----------------
Passed through to :class:`_S11Base`.
"""
assert isinstance(internal_switch, s11.InternalSwitch)
self.internal_switch = internal_switch
super().__init__(**kwargs)
@classmethod
def from_path(
cls,
load_name: str,
path: tp.PathLike,
run_num_load: int = 1,
run_num_switch: int = 1,
repeat_num_load: int = None,
repeat_num_switch: int = None,
resistance: float = 50.166,
model_internal_switch: mdl.Model = attr.NOTHING,
**kwargs,
):
"""
Create a new object from a given path and load name.
Parameters
----------
load_name : str
The name of the load to create.
path : str or Path
The path to the overall calibration observation.
run_num_load : int
The run to use (default is last run available).
run_num_switch : int
The run to use for the switch S11 (default is last run available).
kwargs
All other arguments are passed through to the constructor of
:class:`LoadS11`.
Returns
-------
s11 : :class:`LoadS11`
The S11 of the load.
"""
antsim = load_name.startswith("AntSim")
path = Path(path)
if not antsim:
load_name = io.LOAD_ALIASES[load_name]
s11_load_dir = (io.AntSimS11 if antsim else io.LoadS11)(
path / "S11" / f"{load_name}{run_num_load:02}", repeat_num=repeat_num_load
)
internal_switch = s11.InternalSwitch(
data=io.SwitchingState(
path / "S11" / f"SwitchingState{run_num_switch:02}",
repeat_num=repeat_num_switch,
),
resistance=resistance,
model=model_internal_switch,
)
return cls(load_s11=s11_load_dir, internal_switch=internal_switch, **kwargs)
@cached_property
def measured_load_s11_raw(self):
"""The measured S11 of the load, calculated from raw internal standards."""
return rc.de_embed(
self.open.switchval,
self.short.switchval,
self.match.switchval,
self.open.s11,
self.short.s11,
self.match.s11,
self.external.s11,
)[0]
@cached_property
def corrected_load_s11(self) -> np.ndarray:
"""The measured S11 of the load, corrected for the internal switch."""
return rc.gamma_de_embed(
self.internal_switch.s11_model(self.freq.freq),
self.internal_switch.s12_model(self.freq.freq),
self.internal_switch.s22_model(self.freq.freq),
self.measured_load_s11_raw,
)
class LNA(_S11Base):
def __init__(
self, load_s11: io.ReceiverReading, resistance: float = 50.009, **kwargs
):
"""A special case of :class:`SwitchCorrection` for the LNA.
Parameters
----------
load_s11 : :class:`io.ReceiverReading`
The Receiver Reading S11 measurements.
resistance : float
The resistance of the receiver.
kwargs :
All other arguments passed to :class:`SwitchCorrection`.
"""
super().__init__(load_s11=load_s11, **kwargs)
self.resistance = resistance
self.load_name = "lna"
self.repeat_num = self.load_s11.repeat_num
@classmethod
def from_path(
cls,
path: Union[str, Path],
repeat_num: Optional[int] = None,
run_num: int = 1,
**kwargs,
):
"""
Create an instance from a given path.
Parameters
----------
path : str or Path
Path to overall Calibration Observation.
run_num_load : int
The run to use for the LNA (default latest available).
run_num_switch : int
The run to use for the switching state (default lastest available).
kwargs
All other arguments passed through to :class:`SwitchCorrection`.
Returns
-------
lna : :class:`LNA`
The LNA object.
"""
path = Path(path)
load_s11 = io.ReceiverReading(
path=path / "S11" / f"ReceiverReading{run_num:02}",
repeat_num=repeat_num,
fix=False,
)
return cls(load_s11=load_s11, **kwargs)
@cached_property
def external(self):
"""VNA S11 measurements for the load."""
return S1P(
self.load_s11.children["receiverreading"],
f_low=self.freq.freq.min(),
f_high=self.freq.freq.max(),
)
@cached_property
def measured_load_s11_raw(self):
"""Measured S11 of of the LNA."""
# Models of standards
oa, sa, la = rc.agilent_85033E(
self.freq.freq, self.resistance, match_delay=True
)
# Correction at switch
return rc.de_embed(
oa, sa, la, self.open.s11, self.short.s11, self.match.s11, self.external.s11
)[0]
class LoadSpectrum:
def __init__(
self,
spec_obj: List[io.Spectrum],
resistance_obj: io.Resistance,
switch_correction: Optional[LoadS11] = None,
f_low: float = 40.0,
f_high: Optional[float] = None,
ignore_times_percent: float = 5.0,
rfi_removal: str = "1D2D",
rfi_kernel_width_time: int = 16,
rfi_kernel_width_freq: int = 16,
rfi_threshold: float = 6,
cache_dir: Optional[Union[str, Path]] = None,
t_load: float = 300.0,
t_load_ns: float = 400.0,
):
"""A class representing a measured spectrum from some Load.
Parameters
----------
spec_obj : :class:`io.Spectrum`
The base Spectrum object defining the on-disk spectra.
resistance_obj : :class:`io.Resistance`
The base Resistance object defining the on-disk resistance measurements.
switch_correction : :class:`SwitchCorrection`
A `SwitchCorrection` for this particular load. If not given, will be
constructed automatically.
f_low : float
Minimum frequency to keep.
f_high : float
Maximum frequency to keep.
ignore_times_percent : float
Must be between 0 and 100. Number of time-samples in a file to reject
from the start of the file.
rfi_removal : str
Either '1D', '2D' or '1D2D'. If given, will perform median and mean-filtered
xRFI over either the
2D waterfall, or integrated 1D spectrum. The latter is usually reasonable
for calibration sources, while the former is good for field data. "1D2D"
is a hybrid approach in which the variance per-frequency is determined
from the 2D data, but filtering occurs only over frequency.
rfi_kernel_width_time : int
The kernel width for the detrending of data for
RFI removal in the time dimension (only used if `rfi_removal` is "2D").
rfi_kernel_width_freq : int
The kernel width for the detrending of data for
RFI removal in the frequency dimension.
rfi_threshold : float
The threshold (in equivalent standard deviation units) above which to
flag data as RFI.
cache_dir : str or Path
An alternative directory in which to load/save cached reduced files. By
default, the same as the path to the .mat files. If you don't have
write permission there, it may be useful to use an alternative path.
t_load
Fiducial guess for the temperature of the internal load.
t_load_ns
Fiducial guess for the temperature of the internal load + noise source.
"""
self.spec_obj = spec_obj
self.resistance_obj = resistance_obj
self.load_name = self.spec_obj[0].load_name
assert (
self.load_name == self.resistance_obj.load_name
), "spec and resistance load_name must be the same"
self.spec_files = (spec_obj.path for spec_obj in self.spec_obj)
self.resistance_file = self.resistance_obj.path
self.run_num = self.spec_obj[0].run_num
self.cache_dir = Path(cache_dir or ".")
self.rfi_kernel_width_time = rfi_kernel_width_time
self.rfi_kernel_width_freq = rfi_kernel_width_freq
self.rfi_threshold = rfi_threshold
assert rfi_removal in [
"1D",
"2D",
"1D2D",
False,
None,
], "rfi_removal must be either '1D', '2D', '1D2D, or False/None"
self.rfi_removal = rfi_removal
self.switch_correction = switch_correction
self.ignore_times_percent = ignore_times_percent
self.freq = EdgesFrequencyRange(f_low=f_low, f_high=f_high)
self.t_load = t_load
self.t_load_ns = t_load_ns
@classmethod
def from_load_name(
cls,
load_name: str,
direc: Union[str, Path],
run_num: Optional[int] = None,
filetype: Optional[str] = None,
**kwargs,
):
"""Instantiate the class from a given load name and directory.
Parameters
----------
load_name : str
The load name (one of 'ambient', 'hot_load', 'open' or 'short').
direc : str or Path
The top-level calibration observation directory.
run_num : int
The run number to use for the spectra.
filetype : str
The filetype to look for (acq or h5).
kwargs :
All other arguments to :class:`LoadSpectrum`.
Returns
-------
:class:`LoadSpectrum`.
"""
direc = Path(direc)
spec = io.Spectrum.from_load(
load=load_name, direc=direc / "Spectra", run_num=run_num, filetype=filetype
)
res = io.Resistance.from_load(
load=load_name,
direc=direc / "Resistance",
run_num=run_num,
filetype=filetype,
)
return cls(spec_obj=spec, resistance_obj=res, **kwargs)
@cached_property
def averaged_Q(self) -> np.ndarray:
"""Ratio of powers averaged over time.
Notes
-----
The formula is
.. math:: Q = (P_source - P_load)/(P_noise - P_load)
"""
# TODO: should also get weights!
spec = self._ave_and_var_spec[0]["Q"]
if self.rfi_removal == "1D":
flags, _ = xrfi.xrfi_medfilt(
spec, threshold=self.rfi_threshold, kf=self.rfi_kernel_width_freq
)
spec[flags] = np.nan
return spec
@property
def variance_Q(self) -> np.ndarray:
"""Variance of Q across time (see averaged_Q)."""
return self._ave_and_var_spec[1]["Q"]
@property
def averaged_spectrum(self) -> np.ndarray:
"""T* = T_noise * Q + T_load."""
return self.averaged_Q * self.t_load_ns + self.t_load
@property
def variance_spectrum(self) -> np.ndarray:
"""Variance of uncalibrated spectrum across time (see averaged_spectrum)."""
return self.variance_Q * self.t_load_ns ** 2
@property
def ancillary(self) -> dict:
"""Ancillary measurement data."""
return [d.data["meta"] for d in self.spec_obj]
@property
def averaged_p0(self) -> np.ndarray:
"""Power of the load, averaged over time."""
return self._ave_and_var_spec[0]["p0"]
@property
def averaged_p1(self) -> np.ndarray:
"""Power of the noise-source, averaged over time."""
return self._ave_and_var_spec[0]["p1"]
@property
def averaged_p2(self) -> np.ndarray:
"""Power of the load plus noise-source, averaged over time."""
return self._ave_and_var_spec[0]["p2"]
@property
def variance_p0(self) -> np.ndarray:
"""Variance of the load, averaged over time."""
return self._ave_and_var_spec[1]["p0"]
@property
def variance_p1(self) -> np.ndarray:
"""Variance of the noise-source, averaged over time."""
return self._ave_and_var_spec[1]["p1"]
@property
def variance_p2(self) -> np.ndarray:
"""Variance of the load plus noise-source, averaged over time."""
return self._ave_and_var_spec[1]["p2"]
@property
def n_integrations(self) -> int:
"""The number of integrations recorded for the spectrum (after ignoring)."""
return self._ave_and_var_spec[2]
def _get_integrated_filename(self):
"""Determine a unique filename for the reduced data of this instance."""
params = (
self.rfi_threshold,
self.rfi_kernel_width_time,
self.rfi_kernel_width_freq,
self.rfi_removal,
self.ignore_times_percent,
self.freq.min,
self.freq.max,
self.t_load,
self.t_load_ns,
tuple(path.name for path in self.spec_files),
)
hsh = md5(str(params).encode()).hexdigest()
return self.cache_dir / f"{self.load_name}_{hsh}.h5"
@cached_property
def _ave_and_var_spec(self) -> Tuple[Dict, Dict, int]:
"""Get the mean and variance of the spectra."""
fname = self._get_integrated_filename()
kinds = ["p0", "p1", "p2", "Q"]
if fname.exists():
logger.info(
f"Reading in previously-created integrated {self.load_name} spectra..."
)
means = {}
variances = {}
with h5py.File(fname, "r") as fl:
for kind in kinds:
means[kind] = fl[kind + "_mean"][...]
variances[kind] = fl[kind + "_var"][...]
n_integrations = fl.attrs.get("n_integrations", 0)
return means, variances, n_integrations
logger.info(f"Reducing {self.load_name} spectra...")
spectra = self.get_spectra()
means = {}
variances = {}
for key, spec in spectra.items():
# Weird thing where there are zeros in the spectra.
spec[spec == 0] = np.nan
mean = np.nanmean(spec, axis=1)
var = np.nanvar(spec, axis=1)
n_intg = spec.shape[1]
if self.rfi_removal == "1D2D":
nsample = np.sum(~np.isnan(spec), axis=1)
varfilt = xrfi.flagged_filter(
var, size=2 * self.rfi_kernel_width_freq + 1
)
resid = mean - xrfi.flagged_filter(
mean, size=2 * self.rfi_kernel_width_freq + 1
)
flags = np.logical_or(
resid > self.rfi_threshold * np.sqrt(varfilt / nsample),
var - varfilt
> self.rfi_threshold * np.sqrt(2 * varfilt ** 2 / (nsample - 1)),
)
mean[flags] = np.nan
var[flags] = np.nan
means[key] = mean
variances[key] = var
if not self.cache_dir.exists():
self.cache_dir.mkdir()
with h5py.File(fname, "w") as fl:
logger.info(f"Saving reduced spectra to cache at {fname}")
for kind in kinds:
fl[kind + "_mean"] = means[kind]
fl[kind + "_var"] = variances[kind]
fl.attrs["n_integrations"] = n_intg
return means, variances, n_intg
def get_spectra(self) -> dict:
"""Read all spectra and remove RFI.
Returns
-------
dict :
A dictionary with keys being different powers (p1, p2, p3, Q), and values
being ndarrays.
"""
spec = self._read_spectrum()
if self.rfi_removal == "2D":
for key, val in spec.items():
# Need to set nans and zeros to inf so that median/mean detrending
# can work.
val[np.isnan(val)] = np.inf
if key != "Q":
val[val == 0] = np.inf
flags, _ = xrfi.xrfi_medfilt(
val,
threshold=self.rfi_threshold,
kt=self.rfi_kernel_width_time,
kf=self.rfi_kernel_width_freq,
)
val[flags] = np.nan
spec[key] = val
return spec
def _read_spectrum(self) -> dict:
"""
Read the contents of the spectrum files into memory.
Removes a starting percentage of times, and masks out certain frequencies.
Returns
-------
dict :
A dictionary of the contents of the file. Usually p0, p1, p2 (un-normalised
powers of source, load, and load+noise respectively), and ant_temp (the
uncalibrated, but normalised antenna temperature).
"""
data = [spec_obj.data for spec_obj in self.spec_obj]
n_times = sum(len(d["time_ancillary"]["times"]) for d in data)
out = {
"p0": np.empty((len(self.freq.freq), n_times)),
"p1": np.empty((len(self.freq.freq), n_times)),
"p2": np.empty((len(self.freq.freq), n_times)),
"Q": np.empty((len(self.freq.freq), n_times)),
}
index_start_spectra = int((self.ignore_times_percent / 100) * n_times)
for key, val in out.items():
nn = 0
for d in data:
n = len(d["time_ancillary"]["times"])
val[:, nn : (nn + n)] = d["spectra"][key][self.freq.mask]
nn += n
out[key] = val[:, index_start_spectra:]
return out
@cached_property
def thermistor(self) -> np.ndarray:
"""The thermistor readings."""
ary = self.resistance_obj.read()[0]
return ary[int((self.ignore_times_percent / 100) * len(ary)) :]
@cached_property
def thermistor_temp(self):
"""The associated thermistor temperature in K."""
return rcf.temperature_thermistor(self.thermistor["load_resistance"])
@cached_property
def temp_ave(self):
"""Average thermistor temperature (over time and frequency)."""
return np.nanmean(self.thermistor_temp)
def write(self, path=None):
"""
Write a HDF5 file containing the contents of the LoadSpectrum.
Parameters
----------
path : str
Directory into which to save the file, or full path to file.
If a directory, filename will be <load_name>_averaged_spectrum.h5.
Default is current directory.
"""
path = Path(path or ".")
# Allow to pass in a directory name *or* full path.
if path.is_dir():
path /= f"{self.load_name}_averaged_spectrum.h5"
with h5py.File(path, "w") as fl:
fl.attrs["load_name"] = self.load_name
fl["freq"] = self.freq.freq
fl["averaged_raw_spectrum"] = self.averaged_spectrum
fl["temperature"] = self.thermistor_temp
def plot(
self, thermistor=False, fig=None, ax=None, xlabel=True, ylabel=True, **kwargs
):
"""
Make a plot of the averaged uncalibrated spectrum associated with this load.
Parameters
----------
thermistor : bool
Whether to plot the thermistor temperature on the same axis.
fig : Figure
Optionally, pass a matplotlib figure handle which will be used to plot.
ax : Axis
Optional, pass a matplotlib Axis handle which will be added to.
xlabel : bool
Whether to make an x-axis label.
ylabel : bool
Whether to plot the y-axis label
kwargs :
All other arguments are passed to `plt.subplots()`.
"""
if fig is None:
fig, ax = plt.subplots(
1, 1, facecolor=kwargs.pop("facecolor", "white"), **kwargs
)
if thermistor:
ax.plot(self.freq.freq, self.thermistor_temp)
if ylabel:
ax.set_ylabel("Temperature [K]")
else:
ax.plot(self.freq.freq, self.averaged_spectrum)
if ylabel:
ax.set_ylabel("$T^*$ [K]")
ax.grid(True)
if xlabel:
ax.set_xlabel("Frequency [MHz]")
class HotLoadCorrection:
_kinds = {"s11": 0, "s12": 1, "s22": 2}
def __init__(
self,
path: Union[str, Path] = ":semi_rigid_s_parameters_WITH_HEADER.txt",
f_low: Optional[float] = None,
f_high: Optional[float] = None,
n_terms: int = 21,
):
"""
Corrections for the hot load.
Measurements required to define the HotLoad temperature, from Monsalve et al.
(2017), Eq. 8+9.
Parameters
----------
path : str or Path, optional
Path to a file containing measurements of the semi-rigid cable reflection
parameters. A preceding colon (:) indicates to prefix with DATA_PATH.
The default file was measured in 2015, but there is also a file included
that can be used from 2017: ":semi_rigid_s_parameters_2017.txt".
f_low, f_high : float
Lowest/highest frequency to retain from measurements.
"""
# Get the path to the S11 file.
if not isinstance(path, Path):
path = DATA_PATH / path[1:] if path[0] == ":" else Path(path)
self.path = path
data = np.genfromtxt(self.path)
f = data[:, 0]
self.freq = FrequencyRange(f, f_low, f_high)
if data.shape[1] == 7: # Original file from 2015
self.data = data[self.freq.mask, 1::2] + 1j * data[self.freq.mask, 2::2]
elif data.shape[1] == 6: # File from 2017
self.data = np.array(
[
data[self.freq.mask, 1] + 1j * data[self.freq.mask, 2],
data[self.freq.mask, 3],
data[self.freq.mask, 4] + 1j * data[self.freq.mask, 5],
]
).T
else:
raise IOError("Semi-Rigid Cable file has wrong data format.")
self.n_terms = int(n_terms)
def _get_model_kind(self, kind):
model = mdl.Polynomial(
n_terms=self.n_terms,
transform=mdl.UnitTransform(range=(self.freq.min, self.freq.max)),
)
model = mdl.ComplexMagPhaseModel(mag=model, phs=model)
return model.fit(xdata=self.freq.freq, ydata=self.data[:, self._kinds[kind]])
@cached_property
def s11_model(self):
"""The reflection coefficient."""
return self._get_model_kind("s11")
@cached_property
def s12_model(self):
"""The transmission coefficient."""
return self._get_model_kind("s12")
@cached_property
def s22_model(self):
"""The reflection coefficient from the other side."""
return self._get_model_kind("s22")
def power_gain(self, freq: np.ndarray, hot_load_s11: LoadS11) -> np.ndarray:
"""
Calculate the power gain.
Parameters
----------
freq : np.ndarray
The frequencies.
hot_load_s11 : :class:`LoadS11`
The S11 of the hot load.
Returns
-------
gain : np.ndarray
The power gain as a function of frequency.
"""
assert isinstance(
hot_load_s11, LoadS11
), "hot_load_s11 must be a switch correction"
assert (
hot_load_s11.load_name == "hot_load"
), "hot_load_s11 must be a hot_load s11"
return self.get_power_gain(
{
"s11": self.s11_model(freq),
"s12s21": self.s12_model(freq),
"s22": self.s22_model(freq),
},
hot_load_s11.s11_model(freq),
)
@staticmethod
def get_power_gain(
semi_rigid_sparams: dict, hot_load_s11: np.ndarray
) -> np.ndarray:
"""Define Eq. 9 from M17.
Parameters
----------
semi_rigid_sparams : dict
A dictionary of reflection coefficient measurements as a function of
frequency for the semi-rigid cable.
hot_load_s11 : array-like
The S11 measurement of the hot_load.
Returns
-------
gain : np.ndarray
The power gain.
"""
rht = rc.gamma_de_embed(
semi_rigid_sparams["s11"],
semi_rigid_sparams["s12s21"],
semi_rigid_sparams["s22"],
hot_load_s11,
)
return (
np.abs(semi_rigid_sparams["s12s21"])
* (1 - np.abs(rht) ** 2)
/ (
(np.abs(1 - semi_rigid_sparams["s11"] * rht)) ** 2
* (1 - np.abs(hot_load_s11) ** 2)
)
)
class Load:
def __init__(
self,
spectrum: LoadSpectrum,
reflections: LoadS11,
hot_load_correction: Optional[HotLoadCorrection] = None,
ambient: Optional[LoadSpectrum] = None,
):
"""Wrapper class containing all relevant information for a given load.
Parameters
----------
spectrum : :class:`LoadSpectrum`
The spectrum for this particular load.
reflections : :class:`SwitchCorrection`
The S11 measurements for this particular load.
hot_load_correction : :class:`HotLoadCorrection`
If this is a hot load, provide a hot load correction.
ambient : :class:`LoadSpectrum`
If this is a hot load, need to provide an ambient spectrum to correct it.
"""
assert isinstance(spectrum, LoadSpectrum), "spectrum must be a LoadSpectrum"
assert isinstance(reflections, LoadS11), "spectrum must be a SwitchCorrection"
assert spectrum.load_name == reflections.load_name
self.spectrum = spectrum
self.reflections = reflections
self.load_name = spectrum.load_name
self.t_load = self.spectrum.t_load
self.t_load_ns = self.spectrum.t_load_ns
if self.load_name == "hot_load":
self._correction = hot_load_correction
self._ambient = ambient
@classmethod
def from_path(
cls,
path: Union[str, Path],
load_name: str,
f_low: Optional[float] = None,
f_high: Optional[float] = None,
reflection_kwargs: Optional[dict] = None,
spec_kwargs: Optional[dict] = None,
):
"""
Define a full :class:`Load` from a path and name.
Parameters
----------
path : str or Path
Path to the top-level calibration observation.
load_name : str
Name of a load to define.
f_low, f_high : float
Min/max frequencies to keep in measurements.
reflection_kwargs : dict
Extra arguments to pass through to :class:`SwitchCorrection`.
spec_kwargs : dict
Extra arguments to pass through to :class:`LoadSpectrum`.
Returns
-------
load : :class:`Load`
The load object, containing all info about spectra and S11's for that load.
"""
if not spec_kwargs:
spec_kwargs = {}
if not reflection_kwargs:
reflection_kwargs = {}
spec = LoadSpectrum.from_load_name(
load_name,
path,
f_low=f_low,
f_high=f_high,
**spec_kwargs,
)
refl = LoadS11.from_path(
load_name,
path,
f_low=f_low,
f_high=f_high,
**reflection_kwargs,
)
return cls(spec, refl)
@property
def s11_model(self):
"""The S11 model."""
return self.reflections.s11_model
@cached_property
def temp_ave(self):
"""The average temperature of the thermistor (over frequency and time)."""
if self.load_name != "hot_load":
return self.spectrum.temp_ave
gain = self._correction.power_gain(self.freq.freq, self.reflections)
# temperature
return gain * self.spectrum.temp_ave + (1 - gain) * self._ambient.temp_ave
@property
def averaged_Q(self):
"""Averaged power ratio."""
return self.spectrum.averaged_Q
@property
def averaged_spectrum(self):
"""Averaged uncalibrated temperature."""
return self.spectrum.averaged_spectrum
@property
def freq(self):
"""A :class:`FrequencyRange` object corresponding to this measurement."""
return self.spectrum.freq
class CalibrationObservation:
_sources = ("ambient", "hot_load", "open", "short")
def __init__(
self,
path: Union[str, Path],
semi_rigid_path: Union[str, Path] = ":semi_rigid_s_parameters_WITH_HEADER.txt",
f_low: Optional[float] = 40,
f_high: Optional[float] = None,
run_num: Union[None, int, dict] = None,
repeat_num: Union[None, int, dict] = None,
resistance_f: Optional[float] = None,
cterms: int = 5,
wterms: int = 7,
load_kwargs: Optional[dict] = None,
s11_kwargs: Optional[dict] = None,
load_spectra: Optional[dict] = None,
load_s11s: Optional[dict] = None,
compile_from_def: bool = True,
include_previous: bool = False,
internal_switch_kwargs: Optional[Dict[str, Any]] = None,
):
"""
A composite object representing a full Calibration Observation.
This includes spectra of all calibrators, and methods to find the calibration
parameters. It strictly follows Monsalve et al. (2017) in its formalism.
While by default the class uses the calibrator sources ("ambient", "hot_load",
"open", "short"), it can be modified to take other sources by setting
``CalibrationObservation._sources`` to a new tuple of strings.
Parameters
----------
path : str or Path
Path to the directory containing all relevant measurements. It is assumed
that in this directory is an `S11`, `Resistance` and `Spectra` directory.
semi_rigid_path : str or Path, optional
Path to a file containing S11 measurements for the semi rigid cable. Used to
correct the hot load S11. Found automatically if not given.
ambient_temp : int
Ambient temperature (C) at which measurements were taken.
f_low : float
Minimum frequency to keep for all loads (and their S11's). If for some
reason different frequency bounds are desired per-load, one can pass in
full load objects through ``load_spectra``.
f_high : float
Maximum frequency to keep for all loads (and their S11's). If for some
reason different frequency bounds are desired per-load, one can pass in
full load objects through ``load_spectra``.
run_num : int or dict
Which run number to use for the calibrators. Default is to use the last run
for each. Passing an int will attempt to use that run for each source. Pass
a dict mapping sources to numbers to use different combinations.
repeat_num : int or dict
Which repeat number to use for the calibrators. Default is to use the last
repeat for each. Passing an int will attempt to use that repeat for each
source. Pass a dict mapping sources to numbers to use different
combinations.
resistance_f : float
Female resistance (Ohms). Used for the LNA S11.
cterms : int
The number of terms to use for the polynomial fits to the calibration
functions.
wterms : int
The number of terms to use for the polynomial fits to the noise-wave
calibration functions.
load_kwargs : dict
Keyword arguments used to instantiate the calibrator :class:`LoadSpectrum`
objects. See its documentation for relevant parameters. Parameters specified
here are used for _all_ calibrator sources.
s11_kwargs : dict
Keyword arguments used to instantiate the calibrator :class:`LoadS11`
objects. See its documentation for relevant parameters. Parameters specified
here are used for _all_ calibrator sources.
load_spectra : dict
A dictionary mapping load names of calibration sources (eg. ambient, short)
to either :class:`LoadSpectrum` instances or dictionaries of keywords to
instantiate those objects. Useful for individually specifying
properties of each load separately. Values in these dictionaries (if
supplied) over-ride those given in ``load_kwargs`` (but values in
``load_kwargs`` are still used if not over-ridden).
load_s11s : dict
A dictionary mapping load names of calibration sources (eg. ambient, short)
to :class:`LoadS11` instances or dictionaries of keywords to instantiate
those objects. Useful for individually specifying properties of each load
separately. Values in these dictionaries (if supplied) over-ride those
given in ``s11_kwargs`` (but values in ``s11_kwargs`` are still used if not
over-ridden).
compile_from_def : bool
Whether to attempt compiling a virtual observation from a
``definition.yaml`` inside the observation directory. This is the default
behaviour, but can be turned off to enforce that the current directory
should be used directly.
include_previous : bool
Whether to include the previous observation by default to supplement this
one if required files are missing.
Examples
--------
This will setup an observation with all default options applied:
>>> path = '/CalibrationObservations/Receiver01_25C_2019_11_26_040_to_200MHz'
>>> calobs = CalibrationObservation(path)
To specify some options for constructing the various calibrator load spectra:
>>> calobs = CalibrationObservation(
>>> path,
>>> load_kwargs={"cache_dir":".", "ignore_times_percent": 50}
>>> )
But if we typically wanted 50% of times ignored, but in one special case we'd
like 80%:
>>> calobs = CalibrationObservation(
>>> path,
>>> load_kwargs={"cache_dir":".", "ignore_times_percent": 50},
>>> load_spectra={"short": {"ignore_times_percent": 80}}
>>> )
"""
load_spectra = load_spectra or {}
load_s11s = load_s11s or {}
load_kwargs = load_kwargs or {}
s11_kwargs = s11_kwargs or {}
internal_switch_kwargs = internal_switch_kwargs or {}
assert all(name in self._sources for name in load_spectra)
assert all(name in self._sources + ("lna",) for name in load_s11s)
self.io = io.CalibrationObservation(
path,
run_num=run_num,
repeat_num=repeat_num,
fix=False,
compile_from_def=compile_from_def,
include_previous=include_previous,
)
self.compiled_from_def = compile_from_def
self.previous_included = include_previous
self.path = Path(self.io.path)
hot_load_correction = HotLoadCorrection(semi_rigid_path, f_low, f_high)
self.internal_switch = s11.InternalSwitch(
data=self.io.s11.switching_state,
resistance=self.io.definition["measurements"]["resistance_m"][
self.io.s11.switching_state.run_num
],
**internal_switch_kwargs,
)
self._loads = {}
for source in self._sources:
load = load_spectra.get(source, {})
if isinstance(load, dict):
load = LoadSpectrum(
spec_obj=getattr(self.io.spectra, source),
resistance_obj=getattr(self.io.resistance, source),
f_low=f_low,
f_high=f_high,
**{**load_kwargs, **load},
)
# Ensure that we finally have a LoadSpectrum
if not isinstance(load, LoadSpectrum):
raise TypeError("load_spectra must be a dict of LoadSpectrum or dicts.")
refl = load_s11s.get(source, {})
if isinstance(refl, dict):
refl = LoadS11(
load_s11=getattr(self.io.s11, source),
internal_switch=self.internal_switch,
f_low=f_low,
f_high=f_high,
**{**s11_kwargs, **refl},
)
if source == "hot_load":
self._loads[source] = Load(
load,
refl,
hot_load_correction=hot_load_correction,
ambient=self._loads["ambient"].spectrum,
)
else:
self._loads[source] = Load(load, refl)
for name, load in self._loads.items():
setattr(self, name, load)
refl = load_s11s.get("lna", {})
self.lna = LNA(
load_s11=self.io.s11.receiver_reading,
f_low=f_low,
f_high=f_high,
resistance=resistance_f
or self.io.definition["measurements"]["resistance_f"][
self.io.s11.receiver_reading.run_num
],
**{**s11_kwargs, **refl},
)
# We must use the most restricted frequency range available from all available
# sources as well as the LNA.
fmin = max(
sum(
(
[load.spectrum.freq.min, load.reflections.freq.min]
for load in self._loads.values()
),
[],
)
+ [self.lna.freq.min]
)
fmax = min(
sum(
(
[load.spectrum.freq.max, load.reflections.freq.max]
for load in self._loads.values()
),
[],
)
+ [self.lna.freq.max]
)
if fmax <= fmin:
raise ValueError(
"The inputs loads and S11s have non-overlapping frequency ranges!"
)
self.freq = EdgesFrequencyRange(f_low=fmin, f_high=fmax)
# Now make everything actually consistent in its frequency range.
for load in self._loads.values():
load.spectrum.freq = self.freq
self.cterms = cterms
self.wterms = wterms
self.t_load = self.ambient.t_load
self.t_load_ns = self.ambient.t_load_ns
@property
def load_names(self) -> Tuple[str]:
"""Names of the loads."""
return tuple(self._loads.keys())
def new_load(
self,
load_name: str,
run_num: int = 1,
reflection_kwargs: Optional[dict] = None,
spec_kwargs: Optional[dict] = None,
):
"""Create a new load with the given load name.
Uses files inside the current observation.
Parameters
----------
load_name : str
The name of the load ('ambient', 'hot_load', 'open', 'short').
run_num_spec : dict or int
Run number to use for the spectrum.
run_num_load : dict or int
Run number to use for the load's S11.
reflection_kwargs : dict
Keyword arguments to construct the :class:`SwitchCorrection`.
spec_kwargs : dict
Keyword arguments to construct the :class:`LoadSpectrum`.
"""
reflection_kwargs = reflection_kwargs or {}
spec_kwargs = spec_kwargs or {}
# Fill up kwargs with keywords from this instance
if "resistance" not in reflection_kwargs:
reflection_kwargs[
"resistance"
] = self.open.reflections.internal_switch.resistance
for key in [
"ignore_times_percent",
"rfi_removal",
"rfi_kernel_width_freq",
"rfi_kernel_width_time",
"rfi_threshold",
"cache_dir",
"t_load",
"t_load_ns",
]:
if key not in spec_kwargs:
spec_kwargs[key] = getattr(self.open.spectrum, key)
reflection_kwargs["run_num_load"] = run_num
reflection_kwargs["repeat_num_switch"] = self.io.s11.switching_state.repeat_num
reflection_kwargs["run_num_switch"] = self.io.s11.switching_state.run_num
spec_kwargs["run_num"] = run_num
return Load.from_path(
path=self.io.path,
load_name=load_name,
f_low=self.freq.min,
f_high=self.freq.max,
reflection_kwargs=reflection_kwargs,
spec_kwargs=spec_kwargs,
)
def plot_raw_spectra(self, fig=None, ax=None) -> plt.Figure:
"""
Plot raw uncalibrated spectra for all calibrator sources.
Parameters
----------
fig : :class:`plt.Figure`
A matplotlib figure on which to make the plot. By default creates a new one.
ax : :class:`plt.Axes`
A matplotlib Axes on which to make the plot. By default creates a new one.
Returns
-------
fig : :class:`plt.Figure`
The figure on which the plot was made.
"""
if fig is None and ax is None:
fig, ax = plt.subplots(
len(self._sources), 1, sharex=True, gridspec_kw={"hspace": 0.05}
)
for i, (name, load) in enumerate(self._loads.items()):
load.spectrum.plot(
fig=fig, ax=ax[i], xlabel=(i == (len(self._sources) - 1))
)
ax[i].set_title(name)
return fig
def plot_s11_models(self, **kwargs):
"""
Plot residuals of S11 models for all sources.
Returns
-------
dict:
Each entry has a key of the source name, and the value is a matplotlib fig.
"""
out = {
name: source.reflections.plot_residuals(**kwargs)
for name, source in self._loads.items()
}
out.update({"lna": self.lna.plot_residuals(**kwargs)})
return out
@cached_property
def s11_correction_models(self):
"""Dictionary of S11 correction models, one for each source."""
try:
return dict(self._injected_source_s11s)
except (TypeError, AttributeError):
return {
name: source.s11_model(self.freq.freq)
for name, source in self._loads.items()
}
@cached_property
def source_thermistor_temps(self) -> Dict[str, Union[float, np.ndarray]]:
"""Dictionary of input source thermistor temperatures."""
if (
hasattr(self, "_injected_source_temps")
and self._injected_source_temps is not None
):
return self._injected_source_temps
return {k: source.temp_ave for k, source in self._loads.items()}
@cached_property
def _calibration_coefficients(self):
"""The calibration polynomials, evaluated at `freq.freq`."""
if (
hasattr(self, "_injected_averaged_spectra")
and self._injected_averaged_spectra is not None
):
ave_spec = self._injected_averaged_spectra
else:
ave_spec = {
k: source.averaged_spectrum for k, source in self._loads.items()
}
scale, off, Tu, TC, TS = rcf.get_calibration_quantities_iterative(
self.freq.freq_recentred,
temp_raw=ave_spec,
gamma_rec=self.lna_s11,
gamma_ant=self.s11_correction_models,
temp_ant=self.source_thermistor_temps,
cterms=self.cterms,
wterms=self.wterms,
temp_amb_internal=self.t_load,
)
return scale, off, Tu, TC, TS
@cached_property
def C1_poly(self): # noqa: N802
"""`np.poly1d` object describing the Scaling calibration coefficient C1.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~C1` as a direct
function on frequency.
"""
return self._calibration_coefficients[0]
@cached_property
def C2_poly(self): # noqa: N802
"""`np.poly1d` object describing the offset calibration coefficient C2.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~C2` as a direct
function on frequency.
"""
return self._calibration_coefficients[1]
@cached_property
def Tunc_poly(self): # noqa: N802
"""`np.poly1d` object describing the uncorrelated noise-wave parameter, Tunc.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~Tunc` as a direct
function on frequency.
"""
return self._calibration_coefficients[2]
@cached_property
def Tcos_poly(self): # noqa: N802
"""`np.poly1d` object describing the cosine noise-wave parameter, Tcos.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~Tcos` as a direct
function on frequency.
"""
return self._calibration_coefficients[3]
@cached_property
def Tsin_poly(self): # noqa: N802
"""`np.poly1d` object describing the sine noise-wave parameter, Tsin.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~Tsin` as a direct
function on frequency.
"""
return self._calibration_coefficients[4]
def C1(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Scaling calibration parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate C1. By default, the frequencies of this
instance.
"""
if hasattr(self, "_injected_c1") and self._injected_c1 is not None:
return np.array(self._injected_c1)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.C1_poly(fnorm)
def C2(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Offset calibration parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate C2. By default, the frequencies of this
instance.
"""
if hasattr(self, "_injected_c2") and self._injected_c2 is not None:
return np.array(self._injected_c2)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.C2_poly(fnorm)
def Tunc(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Uncorrelated noise-wave parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate Tunc. By default, the frequencies of
thisinstance.
"""
if hasattr(self, "_injected_t_unc") and self._injected_t_unc is not None:
return np.array(self._injected_t_unc)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tunc_poly(fnorm)
def Tcos(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Cosine noise-wave parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate Tcos. By default, the frequencies of
this instance.
"""
if hasattr(self, "_injected_t_cos") and self._injected_t_cos is not None:
return np.array(self._injected_t_cos)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tcos_poly(fnorm)
def Tsin(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Sine noise-wave parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate Tsin. By default, the frequencies of
this instance.
"""
if hasattr(self, "_injected_t_sin") and self._injected_t_sin is not None:
return np.array(self._injected_t_sin)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tsin_poly(fnorm)
@cached_property
def lna_s11(self):
"""The corrected S11 of the LNA evaluated at the data frequencies."""
if hasattr(self, "_injected_lna_s11") and self._injected_lna_s11 is not None:
return self._injected_lna_s11
else:
return self.lna.s11_model(self.freq.freq)
def get_linear_coefficients(self, load: Union[Load, str]):
"""
Calibration coefficients a,b such that T = aT* + b (derived from Eq. 7).
Parameters
----------
load : str or :class:`Load`
The load for which to get the linear coefficients.
"""
if isinstance(load, str):
load_s11 = self.s11_correction_models[load]
elif load.load_name in self.s11_correction_models:
load_s11 = self.s11_correction_models[load.load_name]
else:
load_s11 = load.s11_model(self.freq.freq)
return rcf.get_linear_coefficients(
load_s11,
self.lna_s11,
self.C1(self.freq.freq),
self.C2(self.freq.freq),
self.Tunc(self.freq.freq),
self.Tcos(self.freq.freq),
self.Tsin(self.freq.freq),
t_load=self.t_load,
)
def calibrate(self, load: Union[Load, str], q=None, temp=None):
"""
Calibrate the temperature of a given load.
Parameters
----------
load : :class:`Load` or str
The load to calibrate.
Returns
-------
array : calibrated antenna temperature in K, len(f).
"""
load = self._load_str_to_load(load)
a, b = self.get_linear_coefficients(load)
if q is not None:
temp = self.t_load_ns * q + self.t_load
elif temp is None:
temp = load.averaged_spectrum
return a * temp + b
def _load_str_to_load(self, load: Union[Load, str]):
if isinstance(load, str):
try:
load = self._loads[load]
except AttributeError:
raise AttributeError(
"load must be a Load object or a string (one of "
"{ambient,hot_load,open,short})"
)
else:
assert isinstance(
load, Load
), "load must be a Load instance, got the {} {}".format(load, type(Load))
return load
def decalibrate(
self, temp: np.ndarray, load: Union[Load, str], freq: np.ndarray = None
):
"""
Decalibrate a temperature spectrum, yielding uncalibrated T*.
Parameters
----------
temp : array_like
A temperature spectrum, with the same length as `freq.freq`.
load : str or :class:`Load`
The load to calibrate.
freq : array-like
The frequencies at which to decalibrate. By default, the frequencies of the
instance.
Returns
-------
array_like : T*, the normalised uncalibrated temperature.
"""
if freq is None:
freq = self.freq.freq
if freq.min() < self.freq.freq.min():
warnings.warn(
"The minimum frequency is outside the calibrated range "
f"({self.freq.freq.min()} - {self.freq.freq.max()} MHz)"
)
if freq.min() > self.freq.freq.max():
warnings.warn("The maximum frequency is outside the calibrated range ")
a, b = self.get_linear_coefficients(load)
return (temp - b) / a
def get_K(
self, freq: np.ndarray | None = None
) -> Dict[str, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:
"""Get the source-S11-dependent factors of Monsalve (2017) Eq. 7."""
if freq is None:
freq = self.freq.freq
gamma_ants = self.s11_correction_models
else:
gamma_ants = {
name: source.s11_model(freq) for name, source in self._loads.items()
}
lna_s11 = self.lna.s11_model(freq)
return {
name: rcf.get_K(gamma_rec=lna_s11, gamma_ant=gamma_ant)
for name, gamma_ant in gamma_ants.items()
}
def plot_calibrated_temp(
self,
load: Union[Load, str],
bins: int = 2,
fig=None,
ax=None,
xlabel=True,
ylabel=True,
):
"""
Make a plot of calibrated temperature for a given source.
Parameters
----------
load : :class:`~LoadSpectrum` instance
Source to plot.
bins : int
Number of bins to smooth over (std of Gaussian kernel)
fig : Figure
Optionally provide a matplotlib figure to add to.
ax : Axis
Optionally provide a matplotlib Axis to add to.
xlabel : bool
Whether to write the x-axis label
ylabel : bool
Whether to write the y-axis label
Returns
-------
fig :
The matplotlib figure that was created.
"""
load = self._load_str_to_load(load)
if fig is None and ax is None:
fig, ax = plt.subplots(1, 1, facecolor="w")
# binning
temp_calibrated = self.calibrate(load)
if bins > 0:
freq_ave_cal = convolve(
temp_calibrated, Gaussian1DKernel(stddev=bins), boundary="extend"
)
else:
freq_ave_cal = temp_calibrated
freq_ave_cal[np.isinf(freq_ave_cal)] = np.nan
rms = np.sqrt(np.mean((freq_ave_cal - np.mean(freq_ave_cal)) ** 2))
ax.plot(
self.freq.freq,
freq_ave_cal,
label=f"Calibrated {load.spectrum.load_name} [RMS = {rms:.3f}]",
)
temp_ave = self.source_thermistor_temps.get(load.load_name, load.temp_ave)
if not hasattr(temp_ave, "__len__"):
ax.axhline(temp_ave, color="C2", label="Average thermistor temp")
else:
ax.plot(
self.freq.freq,
temp_ave,
color="C2",
label="Average thermistor temp",
)
ax.set_ylim([np.nanmin(freq_ave_cal), np.nanmax(freq_ave_cal)])
if xlabel:
ax.set_xlabel("Frequency [MHz]")
if ylabel:
ax.set_ylabel("Temperature [K]")
plt.ticklabel_format(useOffset=False)
ax.grid()
ax.legend()
return plt.gcf()
def get_load_residuals(self):
"""Get residuals of the calibrated temperature for a each load."""
out = {}
for source in self._sources:
load = self._load_str_to_load(source)
cal = self.calibrate(load)
true = self.source_thermistor_temps[source]
out[source] = cal - true
return out
def get_rms(self, smooth: int = 4):
"""Return a dict of RMS values for each source.
Parameters
----------
smooth : int
The number of bins over which to smooth residuals before taking the RMS.
"""
resids = self.get_load_residuals()
out = {}
for name, res in resids.items():
if smooth > 1:
res = convolve(res, Gaussian1DKernel(stddev=smooth), boundary="extend")
out[name] = np.sqrt(np.nanmean(res ** 2))
return out
def plot_calibrated_temps(self, bins=64, fig=None, ax=None):
"""
Plot all calibrated temperatures in a single figure.
Parameters
----------
bins : int
Number of bins in the smoothed spectrum
Returns
-------
fig :
Matplotlib figure that was created.
"""
if fig is None or ax is None or len(ax) != len(self._sources):
fig, ax = plt.subplots(
len(self._sources),
1,
sharex=True,
gridspec_kw={"hspace": 0.05},
figsize=(10, 12),
)
for i, source in enumerate(self._sources):
self.plot_calibrated_temp(
source,
bins=bins,
fig=fig,
ax=ax[i],
xlabel=i == (len(self._sources) - 1),
)
fig.suptitle("Calibrated Temperatures for Calibration Sources", fontsize=15)
return fig
def write_coefficients(self, path: Optional[str] = None):
"""
Save a text file with the derived calibration co-efficients.
Parameters
----------
path : str
Directory in which to write the file. The filename starts with
`All_cal-params` and includes parameters of the class in the filename.
By default, current directory.
"""
path = Path(path or ".")
if path.is_dir():
path /= (
f"calibration_parameters_fmin{self.freq.freq.min()}_"
f"fmax{self.freq.freq.max()}_C{self.cterms}_W{self.wterms}.txt"
)
np.savetxt(
path,
[
self.freq.freq,
self.C1(),
self.C2(),
self.Tunc(),
self.Tcos(),
self.Tsin(),
],
)
def plot_coefficients(self, fig=None, ax=None):
"""
Make a plot of the calibration models, C1, C2, Tunc, Tcos and Tsin.
Parameters
----------
fig : Figure
Optionally pass a matplotlib figure to add to.
ax : Axis
Optionally pass a matplotlib axis to pass to. Must have 5 axes.
"""
if fig is None or ax is None:
fig, ax = plt.subplots(
5, 1, facecolor="w", gridspec_kw={"hspace": 0.05}, figsize=(10, 9)
)
labels = [
"Scale ($C_1$)",
"Offset ($C_2$) [K]",
r"$T_{\rm unc}$ [K]",
r"$T_{\rm cos}$ [K]",
r"$T_{\rm sin}$ [K]",
]
for i, (kind, label) in enumerate(
zip(["C1", "C2", "Tunc", "Tcos", "Tsin"], labels)
):
ax[i].plot(self.freq.freq, getattr(self, kind)())
ax[i].set_ylabel(label, fontsize=13)
ax[i].grid()
plt.ticklabel_format(useOffset=False)
if i == 4:
ax[i].set_xlabel("Frequency [MHz]", fontsize=13)
fig.suptitle("Calibration Parameters", fontsize=15)
return fig
def invalidate_cache(self):
"""Invalidate all cached attributes so they must be recalculated."""
if not hasattr(self, "_cached_"):
return
for cache in self._cached_:
del self.__dict__[cache]
def update(self, **kwargs):
"""Update the class in-place, invalidating the cache as well.
Parameters
----------
kwargs :
All parameters to be updated.
"""
self.invalidate_cache()
for k, v in kwargs.items():
setattr(self, k, v)
def write(self, filename: Union[str, Path]):
"""
Write all information required to calibrate a new spectrum to file.
Parameters
----------
filename : path
The filename to write to.
"""
with h5py.File(filename, "w") as fl:
# Write attributes
fl.attrs["path"] = str(self.io.original_path)
fl.attrs["cterms"] = self.cterms
fl.attrs["wterms"] = self.wterms
fl.attrs["switch_path"] = str(self.internal_switch.data.path)
fl.attrs["switch_repeat_num"] = self.internal_switch.data.repeat_num
fl.attrs["switch_resistance"] = self.internal_switch.resistance
fl.attrs["switch_nterms"] = self.internal_switch.n_terms[0]
fl.attrs["switch_model"] = str(self.internal_switch.model)
fl.attrs["t_load"] = self.open.spectrum.t_load
fl.attrs["t_load_ns"] = self.open.spectrum.t_load_ns
fl["C1"] = self.C1_poly.coefficients
fl["C2"] = self.C2_poly.coefficients
fl["Tunc"] = self.Tunc_poly.coefficients
fl["Tcos"] = self.Tcos_poly.coefficients
fl["Tsin"] = self.Tsin_poly.coefficients
fl["frequencies"] = self.freq.freq
fl["lna_s11_real"] = self.lna.s11_model(self.freq.freq).real
fl["lna_s11_imag"] = self.lna.s11_model(self.freq.freq).imag
fl["internal_switch_s11_real"] = np.real(
self.internal_switch.s11_model(self.freq.freq)
)
fl["internal_switch_s11_imag"] = np.imag(
self.internal_switch.s11_model(self.freq.freq)
)
fl["internal_switch_s12_real"] = np.real(
self.internal_switch.s12_model(self.freq.freq)
)
fl["internal_switch_s12_imag"] = np.imag(
self.internal_switch.s12_model(self.freq.freq)
)
fl["internal_switch_s22_real"] = np.real(
self.internal_switch.s22_model(self.freq.freq)
)
fl["internal_switch_s22_imag"] = np.imag(
self.internal_switch.s22_model(self.freq.freq)
)
load_grp = fl.create_group("loads")
for name, load in self._loads.items():
grp = load_grp.create_group(name)
grp.attrs["s11_model"] = yaml.dump(load.s11_model)
grp["averaged_Q"] = load.spectrum.averaged_Q
grp["variance_Q"] = load.spectrum.variance_Q
grp["temp_ave"] = load.temp_ave
grp.attrs["n_integrations"] = load.spectrum.n_integrations
def to_calfile(self):
"""Directly create a :class:`Calibration` object without writing to file."""
return Calibration.from_calobs(self)
def inject(
self,
lna_s11: np.ndarray = None,
source_s11s: Dict[str, np.ndarray] = None,
c1: np.ndarray = None,
c2: np.ndarray = None,
t_unc: np.ndarray = None,
t_cos: np.ndarray = None,
t_sin: np.ndarray = None,
averaged_spectra: Dict[str, np.ndarray] = None,
thermistor_temp_ave: Dict[str, np.ndarray] = None,
) -> CalibrationObservation:
"""Make a new :class:`CalibrationObservation` based on this, with injections.
Parameters
----------
lna_s11
The LNA S11 as a function of frequency to inject.
source_s11s
Dictionary of ``{source: S11}`` for each source to inject.
c1
Scaling parameter as a function of frequency to inject.
c2 : [type], optional
Offset parameter to inject as a function of frequency.
t_unc
Uncorrelated temperature to inject (as function of frequency)
t_cos
Correlated temperature to inject (as function of frequency)
t_sin
Correlated temperature to inject (as function of frequency)
averaged_spectra
Dictionary of ``{source: spectrum}`` for each source to inject.
Returns
-------
:class:`CalibrationObservation`
A new observation object with the injected models.
"""
new = copy(self)
new.invalidate_cache()
new._injected_lna_s11 = lna_s11
new._injected_source_s11s = source_s11s
new._injected_c1 = c1
new._injected_c2 = c2
new._injected_t_unc = t_unc
new._injected_t_cos = t_cos
new._injected_t_sin = t_sin
new._injected_averaged_spectra = averaged_spectra
new._injected_source_temps = thermistor_temp_ave
return new
@attr.s
class _LittleS11:
s11_model: Callable = attr.ib()
@attr.s
class _LittleSpectrum:
averaged_Q: np.ndarray = attr.ib()
variance_Q: np.ndarray = attr.ib()
n_integrations: int = attr.ib()
@attr.s
class _LittleLoad:
reflections: _LittleS11 = attr.ib()
spectrum: _LittleSpectrum = attr.ib()
temp_ave: np.ndarray = attr.ib()
class Calibration:
def __init__(self, filename: Union[str, Path]):
"""
A class defining an interface to a HDF5 file containing calibration information.
Parameters
----------
filename : str or Path
The path to the calibration file.
"""
self.calfile = Path(filename)
with h5py.File(filename, "r") as fl:
self.calobs_path = fl.attrs["path"]
self.cterms = int(fl.attrs["cterms"])
self.wterms = int(fl.attrs["wterms"])
self.t_load = fl.attrs.get("t_load", 300)
self.t_load_ns = fl.attrs.get("t_load_ns", 400)
self.C1_poly = np.poly1d(fl["C1"][...])
self.C2_poly = np.poly1d(fl["C2"][...])
self.Tcos_poly = np.poly1d(fl["Tcos"][...])
self.Tsin_poly = np.poly1d(fl["Tsin"][...])
self.Tunc_poly = np.poly1d(fl["Tunc"][...])
self.freq = FrequencyRange(fl["frequencies"][...])
self._loads = {}
if "loads" in fl:
lg = fl["loads"]
self.load_names = list(lg.keys())
for name, grp in lg.items():
self._loads[name] = _LittleLoad(
reflections=_LittleS11(
s11_model=yaml.load(
grp.attrs["s11_model"], Loader=yaml.FullLoader
).at(x=self.freq.freq)
),
spectrum=_LittleSpectrum(
averaged_Q=grp["averaged_Q"][...],
variance_Q=grp["variance_Q"][...],
n_integrations=grp.attrs["n_integrations"],
),
temp_ave=grp["temp_ave"][...],
)
self._lna_s11_rl = Spline(self.freq.freq, fl["lna_s11_real"][...])
self._lna_s11_im = Spline(self.freq.freq, fl["lna_s11_imag"][...])
self._intsw_s11_rl = Spline(
self.freq.freq, fl["internal_switch_s11_real"][...]
)
self._intsw_s11_im = Spline(
self.freq.freq, fl["internal_switch_s11_imag"][...]
)
self._intsw_s12_rl = Spline(
self.freq.freq, fl["internal_switch_s12_real"][...]
)
self._intsw_s12_im = Spline(
self.freq.freq, fl["internal_switch_s12_imag"][...]
)
self._intsw_s22_rl = Spline(
self.freq.freq, fl["internal_switch_s22_real"][...]
)
self._intsw_s22_im = Spline(
self.freq.freq, fl["internal_switch_s22_imag"][...]
)
@classmethod
def from_calobs(cls, calobs: CalibrationObservation) -> Calibration:
"""Generate a :class:`Calibration` from an in-memory observation."""
tmp = tempfile.mktemp()
calobs.write(tmp)
return cls(tmp)
def lna_s11(self, freq=None):
"""Get the LNA S11 at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._lna_s11_rl(freq) + 1j * self._lna_s11_im(freq)
def internal_switch_s11(self, freq=None):
"""Get the S11 of the internal switch at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._intsw_s11_rl(freq) + 1j * self._intsw_s11_im(freq)
def internal_switch_s12(self, freq=None):
"""Get the S12 of the internal switch at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._intsw_s12_rl(freq) + 1j * self._intsw_s12_im(freq)
def internal_switch_s22(self, freq=None):
"""Get the S22 of the internal switch at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._intsw_s22_rl(freq) + 1j * self._intsw_s22_im(freq)
def C1(self, freq=None):
"""Evaluate the Scale polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.C1_poly(self.freq.normalize(freq))
def C2(self, freq=None):
"""Evaluate the Offset polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.C2_poly(self.freq.normalize(freq))
def Tcos(self, freq=None):
"""Evaluate the cos temperature polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.Tcos_poly(self.freq.normalize(freq))
def Tsin(self, freq=None):
"""Evaluate the sin temperature polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.Tsin_poly(self.freq.normalize(freq))
def Tunc(self, freq=None):
"""Evaluate the uncorrelated temperature polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.Tunc_poly(self.freq.normalize(freq))
def _linear_coefficients(self, freq, ant_s11):
return rcf.get_linear_coefficients(
ant_s11,
self.lna_s11(freq),
self.C1(freq),
self.C2(freq),
self.Tunc(freq),
self.Tcos(freq),
self.Tsin(freq),
self.t_load,
)
def calibrate_temp(self, freq: np.ndarray, temp: np.ndarray, ant_s11: np.ndarray):
"""
Calibrate given uncalibrated spectrum.
Parameters
----------
freq : np.ndarray
The frequencies at which to calibrate
temp : np.ndarray
The temperatures to calibrate (in K).
ant_s11 : np.ndarray
The antenna S11 for the load.
Returns
-------
temp : np.ndarray
The calibrated temperature.
"""
a, b = self._linear_coefficients(freq, ant_s11)
return temp * a + b
def decalibrate_temp(self, freq, temp, ant_s11):
"""
De-calibrate given calibrated spectrum.
Parameters
----------
freq : np.ndarray
The frequencies at which to calibrate
temp : np.ndarray
The temperatures to calibrate (in K).
ant_s11 : np.ndarray
The antenna S11 for the load.
Returns
-------
temp : np.ndarray
The calibrated temperature.
Notes
-----
Using this and then :method:`calibrate_temp` immediately should be an identity
operation.
"""
a, b = self._linear_coefficients(freq, ant_s11)
return (temp - b) / a
def calibrate_Q(
self, freq: np.ndarray, q: np.ndarray, ant_s11: np.ndarray
) -> np.ndarray:
"""
Calibrate given power ratio spectrum.
Parameters
----------
freq : np.ndarray
The frequencies at which to calibrate
q : np.ndarray
The power ratio to calibrate.
ant_s11 : np.ndarray
The antenna S11 for the load.
Returns
-------
temp : np.ndarray
The calibrated temperature.
"""
uncal_temp = self.t_load_ns * q + self.t_load
return self.calibrate_temp(freq, uncal_temp, ant_s11)
def perform_term_sweep(
calobs: CalibrationObservation,
delta_rms_thresh: float = 0,
max_cterms: int = 15,
max_wterms: int = 15,
explore_run_nums: bool = False,
explore_repeat_nums: bool = False,
direc=".",
verbose=False,
) -> CalibrationObservation:
"""For a given calibration definition, perform a sweep over number of terms.
There are options to save _every_ calibration solution, or just the "best" one.
Parameters
----------
calobs: :class:`CalibrationObservation` instance
The definition calibration class. The `cterms` and `wterms` in this instance
should define the *lowest* values of the parameters to sweep over.
delta_rms_thresh : float
The threshold in change in RMS between one set of parameters and the next that
will define where to cut off. If zero, will run all sets of parameters up to
the maximum terms specified.
max_cterms : int
The maximum number of cterms to trial.
max_wterms : int
The maximum number of wterms to trial.
explore_run_nums : bool
Whether to iterate over S11 run numbers to find the best residuals.
explore_repeat_nums : bool
Whether to iterate over S11 repeat numbers to find the best residuals.
direc : str
Directory to write resultant :class:`Calibration` file to.
verbose : bool
Whether to write out the RMS values derived throughout the sweep.
Notes
-----
When exploring run/repeat nums, run nums are kept constant within a load (i.e. the
match/short/open etc. all have either run_num=1 or run_num=2 for the same load.
This is physically motivated.
"""
cterms = range(calobs.cterms, max_cterms)
wterms = range(calobs.wterms, max_wterms)
winner = np.zeros(len(cterms), dtype=int)
s11_keys = ["switching_state", "receiver_reading"] + list(io.LOAD_ALIASES.keys())
if explore_repeat_nums:
# Note that we don't explore run_nums for spectra/resistance, because it's rare
# to have those, and they'll only exist if one got completely botched (and that
# should be set by the user).
rep_num = {
k: range(1, getattr(calobs.io.s11, k).max_repeat_num + 1) for k in s11_keys
}
else:
rep_num = {k: [getattr(calobs.io.s11, k).repeat_num] for k in s11_keys}
rep_num = tools.dct_of_list_to_list_of_dct(rep_num)
if explore_run_nums:
run_num = {
"switching_state": range(
1, calobs.io.s11.get_highest_run_num("SwitchingState") + 1
),
"receiver_reading": range(
1, calobs.io.s11.get_highest_run_num("ReceiverReading") + 1
),
}
else:
run_num = {
"switching_state": [calobs.io.s11.switching_state.run_num],
"receiver_reading": [calobs.io.s11.receiver_reading.run_num],
}
run_num = tools.dct_of_list_to_list_of_dct(run_num)
best_rms = np.inf
for this_rep_num in rep_num:
for this_run_num in run_num:
tmp_run_num = copy(calobs.io.run_num)
tmp_run_num.update(this_run_num)
# Change the base io.CalObs because it will change with rep/run.
calobs.io = io.CalibrationObservation(
path=calobs.io.path,
run_num=tmp_run_num,
repeat_num=this_rep_num,
fix=False,
compile_from_def=calobs.compiled_from_def,
include_previous=calobs.previous_included,
)
calobs.lna = LNA(
calobs.io.s11.receiver_reading,
f_low=calobs.freq.min,
f_high=calobs.freq.max,
resistance=calobs.lna.resistance,
)
# If we're changing anything else, we need to change each load.
for name, load in calobs._loads.items():
load.reflections = LoadS11.from_path(
load_name=name,
path=calobs.io.path,
repeat_num_load=this_rep_num[name],
run_num_switch=this_run_num["switching_state"],
repeat_num_switch=this_rep_num["switching_state"],
)
if verbose:
print(
f"SWEEPING SwSt={calobs.io.s11.switching_state.repeat_num}, "
f"RcvRd={calobs.io.s11.receiver_reading.repeat_num} "
f"[Sw={calobs.io.s11.switching_state.run_num}, "
f"RR={calobs.io.s11.receiver_reading.run_num}, "
f"open={calobs.io.s11.open.run_num}, "
f"short={calobs.io.s11.short.run_num}, "
f"ambient={calobs.io.s11.ambient.run_num}, "
f"hot={calobs.io.s11.hot_load.run_num}]"
)
print("-" * 30)
rms = np.zeros((len(cterms), len(wterms)))
for i, c in enumerate(cterms):
for j, w in enumerate(wterms):
calobs.update(cterms=c, wterms=w)
res = calobs.get_load_residuals()
dof = sum(len(r) for r in res.values()) - c - w
rms[i, j] = np.sqrt(
sum(np.nansum(np.square(x)) for x in res.values()) / dof
)
if verbose:
print(f"Nc = {c:02}, Nw = {w:02}; RMS/dof = {rms[i, j]:1.3e}")
# If we've decreased by more than the threshold, this wterms becomes
# the new winner (for this number of cterms)
if j > 0 and rms[i, j] >= rms[i, j - 1] - delta_rms_thresh:
winner[i] = j - 1
break
if (
i > 0
and rms[i, winner[i]]
>= rms[i - 1, winner[i - 1]] - delta_rms_thresh
):
break
if verbose:
print(
f"Best parameters found for Nc={cterms[i-1]}, "
f"Nw={wterms[winner[i-1]]}, "
f"with RMS = {rms[i-1, winner[i-1]]}."
)
print()
if rms[i - 1, winner[i - 1]] < best_rms:
best_run_combo = (
calobs.io.run_num,
calobs.io.s11.receiver_reading.repeat_num,
calobs.io.s11.switching_state.repeat_num,
)
best_cterms = cterms[i - 1]
best_wterms = wterms[winner[i - 1]]
if verbose and (explore_repeat_nums or explore_run_nums):
print("The very best parameters were found were for:")
print(f"\tSwitchingState Repeat = {best_run_combo[2]}")
print(f"\tReceiverReading Repeat = {best_run_combo[1]}")
print(f"\tRun Numbers = {best_run_combo[0]}")
print(f"\t# C-terms = {best_cterms}")
print(f"\t# W-terms = {best_wterms}")
calobs.update(cterms=best_cterms, wterms=best_wterms)
calobs.io = io.CalibrationObservation(
path=calobs.io.path,
run_num=best_run_combo[0],
repeat_num={
"switching_state": best_run_combo[2],
"receiver_reading": best_run_combo[1],
},
fix=False,
compile_from_def=calobs.compiled_from_def,
include_previous=calobs.previous_included,
)
calobs.lna = LNA(
calobs.io.s11.receiver_reading,
f_low=calobs.freq.min,
f_high=calobs.freq.max,
resistance=calobs.lna.resistance,
)
if direc is not None:
direc = Path(direc)
if not direc.exists():
direc.mkdir(parents=True)
pth = Path(calobs.path).parent.name
pth = str(pth) + f"_c{calobs.cterms}_w{calobs.wterms}.h5"
calobs.write(direc / pth)
return calobs
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from abc import ABCMeta, abstractmethod
from threading import RLock, Thread
from fuse import fuse_get_context
def get_lock(threads, monitoring_delay):
return PathLock(monitoring_delay=monitoring_delay) if threads else DummyLock()
def monitor_locks(monitor_lock, locks, timeout):
while True:
try:
monitor_lock.acquire()
logging.debug('Updating path lock status')
free_paths = [path for path, lock in locks.iteritems() if lock.acquire(blocking=False)]
logging.debug('Releasing %d locks' % len(free_paths))
for path in free_paths:
del locks[path]
logging.debug('Finished path lock status update')
finally:
monitor_lock.release()
time.sleep(timeout)
class FileSystemLock:
__metaclass__ = ABCMeta
@abstractmethod
def lock(self, path):
pass
@abstractmethod
def unlock(self, path):
pass
class DummyLock(FileSystemLock):
def lock(self, path):
pass
def unlock(self, path):
pass
class PathLock(FileSystemLock):
def __init__(self, monitoring_delay=600):
self._mutex = RLock()
self._monitor_lock = RLock()
self._locks = {}
self._monitor = Thread(target=monitor_locks, args=(self._monitor_lock, self._locks, monitoring_delay,))
self._monitor.daemon = True
self._monitor.start()
def lock(self, path):
try:
self._monitor_lock.acquire()
logging.debug('Locking path %s for %s' % (path, str(fuse_get_context())))
path_lock = self._get_path_lock(path)
self._lock_path(path_lock)
logging.debug('Acquired lock for %s' % path)
finally:
self._monitor_lock.release()
def unlock(self, path):
logging.debug('Unlocking path %s for %s' % (path, str(fuse_get_context())))
self._release_path(path)
def _release_path(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
logging.debug('Cannot release non-existing lock.')
else:
self._locks[path].release()
logging.debug('Released lock for %s' % path)
finally:
self._mutex.release()
logging.debug('Finished unlocking for %s' % path)
def _get_path_lock(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
self._locks[path] = RLock()
logging.debug('Created new lock for %s' % path)
return self._locks[path]
finally:
self._mutex.release()
def _lock_path(self, path_lock):
try:
path_lock.acquire()
except:
path_lock.release()
raise
|
"""
# # Exercise 2: Service Health Check
# Create one or multiple filip clients and check if the corresponding services
# are up and running by accessing their version information.
# The input sections are marked with 'ToDo'
# #### Steps to complete:
# 1. Set up the missing parameters in the parameter section
# 2. Create filip ngsi_v2 clients for the individual services and check for
# their version
# 3. Create a config object for the ngsi_v2 multi client (HttpClient),
# create the multi client and again check for services' versions
"""
# ## Import packages
from filip.clients.ngsi_v2 import \
HttpClient, \
HttpClientConfig, \
ContextBrokerClient, \
IoTAClient, \
QuantumLeapClient
# ## Parameters
# ToDo: Enter your context broker url and port, e.g. http://localhost:1026
CB_URL = "http://localhost:1026"
# ToDo: Enter your IoT-Agent url and port, e.g. http://localhost:4041
IOTA_URL = "http://localhost:4041"
# ToDo: Enter your QuantumLeap url and port, e.g. http://localhost:8668
QL_URL = "http://localhost:8668"
# ## Main script
if __name__ == "__main__":
# ToDo: Create a single client for each service and check the service for
# its version
cbc = ContextBrokerClient(url=CB_URL)
print(cbc.get_version())
iotac = IoTAClient(url=IOTA_URL)
print(iotac.get_version())
qlc = QuantumLeapClient(url=QL_URL)
print(qlc.get_version())
# ToDo: Create a configuration object for a multi client
config = HttpClientConfig(cb_url=CB_URL, iota_url=IOTA_URL, ql_url=QL_URL)
# ToDo: Create a multi client check again all services for their version
multic = HttpClient(config=config)
print(multic.cb.get_version())
print(multic.iota.get_version())
print(multic.timeseries.get_version())
|
from .base import AsyncUrbanClient, UrbanClient, UrbanDefinition, UrbanDictionaryError
|
# Copyright (c) 2015 Walt Chen
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
class ParserException(Exception):
pass
class Stock(object):
# yesterday_close is yesterday close price
# close is today close price
# volume: unit of stock transacted
# turnover: total transaction money
def __init__(self, code, date, open, high, low, close, volume):
self.code = code
self.date = str(date)
self.open = open
self.high = high
self.low = low
self.close = close
self.volume = volume
def __str__(self):
return "%s\tvol: %s\topen: %s\tHI: %s\t LO: %s\tclose: %s" %\
(self.date, self.volume, self.open, self.high, self.low, self.close)
__all__ = ['ParserException', 'Stock']
|
from pathlib import Path
import copy
import time
import torch.optim as optim
import numpy as np
import torch
from torch.autograd import Variable
from model import *
from data_utils import *
import torch.nn as nn
from loguru import logger
feature_dim = 8
block_size = 16
pad=2
n_conv=3
thresh=0.5
debug = False
def test_bottom_io():
tsdf = [torch.from_numpy(np.random.rand(1, 1, block_size+2*pad+2*n_conv,
block_size+2*pad+2*n_conv,
block_size+2*pad+2*n_conv)).float().to(device)]
prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)
).float().to(device)}
mod = BottomLevel(feature_dim, block_size=block_size)
if device == 'cuda':
mod.cuda()
out = mod(tsdf, prev)
assert type(out) == list
assert len(out) == 1
out = out[0]
assert len(out) == 1
for X in out.keys():
assert out[X].shape == (1, 2, block_size, block_size, block_size), out[X].shape
def test_convtrans():
conv1 = nn.ConvTranspose3d(10, 10, kernel_size=4, stride=2, output_padding=0, padding=0, bias=False)
dat = torch.ones(1, 10, block_size, block_size, block_size)
y = conv1(dat)
assert y.shape[-1] == block_size*2+2 , (y.shape, dat.shape)
pad = nn.ReplicationPad3d(1)
conv1 = nn.ConvTranspose3d(1, 1, kernel_size=3, stride=2,
output_padding=1, padding=1, bias=False)
dat = Variable(torch.ones(1, 1, 4, 4, 4))
y = conv1(dat)
assert y.shape[-1] == 8, y.shape
def test_data():
data = TsdfGenerator(64)
vis = visdom.Visdom()
gt, tsdf_in = data.__getitem__(0)
assert np.abs(tsdf_in).max() < 33
def test_ellipsoid():
arr = ellipsoid(10, 10, 10, levelset=True)*10 # the output is ~normalized. multiple by 10
assert arr.shape == (23, 23, 23), arr.shape
dist = np.sqrt(11**2*3)-10
assert np.abs(arr[0, 0, 0]) > dist, (arr[0, 0, 0], dist)
print(arr[0, 0, 0], dist)
a, b, c = 10, 15, 25
arr = ellipsoid(a, b, c, levelset=True)
# if we move 1 voxel in space the sdf should also not change by more than 1
# compare to 1.01 for numeric reasons
assert np.all(np.abs(np.diff(arr, axis=0)) <= 1.01), np.abs(np.diff(arr, axis=0)).max()
assert np.all(np.abs(np.diff(arr, axis=1)) <= 1.01)
assert np.all(np.abs(np.diff(arr, axis=2)) <= 1.01)
def test_criteria_trivial():
data = TsdfGenerator(block_size, sigma=0.)
gt, tsdf_in = data.__getitem_split__()
gt = gt[None, :] # add dim for batch
assert np.abs(tsdf_in).max() < 33
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
assert len(criteria.gt_octree) == 1
mock_out = np.concatenate((tsdf_in[None,:]<0, tsdf_in[None,:]>=0),
axis=1).astype(float)
mock_out=1000*(mock_out-0.5)
mock_out = [{(0,0,0):torch.from_numpy(mock_out).float()}]
loss = criteria(mock_out)
assert loss.dim()==0
assert loss < 0.01, loss
def test_gt():
pass
#get gt,
#get gt_octree
#retnder gt
#render gt_octree
def test_criteria(levels=2):
res=2**(levels-1)*block_size
data = TsdfGenerator(res, sigma=0.9)
gt, tsdf_in = data.__getitem_split__()
gt = gt[None, :] # add dim for batch
assert np.abs(tsdf_in).max() < res
#labels should be symetric
def count_label(gt, label, level=1):
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
gt=criteria.gt_octree[level]
return np.count_nonzero(np.array(list(gt.values()))==label)
n_outside = count_label(gt, OUTSIDE)
n_inside = count_label(gt, INSIDE)
n_mixed = count_label(gt, MIXED)
assert n_outside+n_inside+n_mixed==(2**(levels-2))**3
rev_inside = count_label(-gt, OUTSIDE)
assert n_inside==rev_inside, (n_inside, rev_inside)
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
assert len(criteria.gt_octree) == levels
assert len(criteria.gt_octree[0]) == (2**(levels-1))**3, len(criteria.gt_octree[0])
assert len(criteria.gt_octree[-1]) == 1, len(criteria.gt_octree[-1])
for l, level in enumerate(criteria.gt_octree):
for k, v in level.items():
assert v.dim() > 0, (l, k, v)
def test_basic_debug():
T = torch.zeros(1,1,36,36,36)
outplane = 16
mod = nn.Conv3d(1, outplane, kernel_size=3, stride=1,
padding=0, bias=False)
T = mod(T)
mod = nn.BatchNorm3d(outplane)
T = mod(T)
mod = nn.ReLU(inplace=True)
T = mod(T)
mod = nn.Conv3d(outplane, outplane, kernel_size=3, stride=1,
padding=0, bias=False)
T = mod(T)
mod = nn.BatchNorm3d(outplane)
T = mod(T)
assert T.shape == (1,16,32,32,32)
def test_simple_net_single_data():
data = TsdfGenerator(block_size, sigma=0.9)
vis = visdom.Visdom()
gt, tsdf_in = data.__getitem__(0)
gt = gt[None, :] # add dim for batch
assert np.abs(tsdf_in).max() < block_size
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int)).to(device)
rep_pad = nn.ReplicationPad3d(pad+n_conv)
tsdf = [rep_pad(torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device))]
#prev = {(0, 0, 0): torch.rand(1, feature_dim, block_size//2, block_size//2,
# block_size//2).float().to(device)}
prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)
).float().to(device)}
#assert tsdf[0].shape == (1, 1, block_size, block_size, block_size)
assert gt_label.shape == (1, block_size, block_size, block_size)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
mod = BottomLevel(feature_dim, block_size)
if device=='cuda':
mod.cuda()
criteria.cuda()
optimizer = optim.Adam(mod.parameters(), lr=0.001) # , momentum=0.9)
for it in range(1, 100):
out = mod(tsdf, prev)
assert len(out) == 1
assert out[0][(0,0,0)].shape[1] == 2, out.shape
loss = criteria(out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (it+1) % 10 == 0:
sdf_ = octree_to_sdf(out, block_size)
print('level ', np.count_nonzero(sdf_ == 1))
err = plotVoxelVisdom(gt[0], sdf_, tsdf_in[0], vis)
assert np.abs(tsdf_in).max() < 33
print(err)
print(it, loss)
assert err < 2
def test_bottom_layer( block_size = 32):
dataset = TsdfGenerator(block_size, n_elips=1, sigma=0.9, epoch_size=1000)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
num_workers=4)
vis = visdom.Visdom()
mod = BottomLevel(feature_dim, block_size)
if device=='cuda':
mod.cuda()
optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.9)
m = nn.ReplicationPad3d(mod.pad+mod.n_conv)
prev = {(0, 0, 0): torch.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad
).float().to(device)}
gt_label = None
for it, (gt, tsdf_in) in enumerate(train_loader):
assert np.abs(tsdf_in).max() < 33
assert gt.max() > 1 and gt.min() < -1
gt_label = torch.ones_like(gt)*INSIDE
gt_label[gt >= 0] = OUTSIDE
gt_label = gt_label.long().to(device)
tsdf = [m(tsdf_in).float().to(device)]
for T in prev.values():
assert torch.all(torch.isfinite(T))
for T in tsdf:
assert torch.all(torch.isfinite(T))
out = mod(tsdf, prev)
assert out[0][(0,0,0)].max()>out[0][(0,0,0)].min()
for oct in out:
if not np.all([torch.all(torch.isfinite(o)) for o in oct.values()]):
import ipdb; ipdb.set_trace()
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device=='cuda':
criteria.cuda()
loss = criteria(out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(it, loss)
if it>1 and it%100 == 0:
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
print(it, err)
assert err < 2, err
def test_2tier_net_single_data():
res = block_size*2
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=100)
vis = visdom.Visdom()
mod = TopLevel(feature_dim, BottomLevel(feature_dim, block_size), block_size=block_size)
if device == 'cuda':
mod.cuda()
optimizer = optim.Adam(mod.parameters(), lr=0.01)#, momentum=0.9)
gt, tsdf_in = dataset.__getitem__(0)
assert np.abs(tsdf_in).max() < 33
assert gt.max() > 1 and gt.min() < -1
gt = torch.from_numpy(gt[None, :])
gt_label = torch.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
tsdf = torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device)
for it in range(1000):
out = mod(tsdf)
assert len(out) == 2
for l in out[1:]:
for v in l.values():
# only level 0 can have a full bloc
assert v.shape[-1] < block_size, (v.shape)
loss = criteria(out)
assert len(out) == 2
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(it, loss)
if (it+1) % 10 == 0:
#mod.eval()
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0], vis)
#mod.train()
print(it, err)
assert err < 2,err
def test_4tier_data(block_size=block_size):
res=block_size*(2**3)
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000)
gt, tsdf = dataset.__getitem__(0)
mod = BottomLevel(feature_dim, block_size)
for i in range(2): #add 2 mid layers
print('adding mid layer')
mod = MidLevel(feature_dim, feature_dim, mod, block_size,
thresh=thresh, budget=4)
mod = TopLevel(feature_dim, mod, block_size=block_size)
out = mod(torch.from_numpy(tsdf[None,:]).float())
def test_2tier_net(res=64, block_size=block_size):
dataset = TsdfGenerator(res, n_elips=1, sigma=0.9, epoch_size=10000, debug=False)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
num_workers=2)
vis = visdom.Visdom()
Force = False
if not Force and Path('model_2tier.pth').exists():
mod = torch.load('model_2tier.pth')
else:
layers = []
layers.append(BottomLevel(feature_dim, block_size))
while block_size*2**len(layers) <= res/2:
print('adding mid layer', len(layers))
layers.append(MidLevel(feature_dim, feature_dim, layers[-1],
block_size, thresh=0.5, budget=4))
mod = TopLevel(feature_dim, layers[-1], block_size=block_size)
if device == 'cuda':
mod.cuda()
optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.95)
for it, (gt, tsdf_in) in enumerate(train_loader):
assert np.abs(tsdf_in).max() < res
assert gt.max() > 1 and gt.min() < -1
gt_label = torch.zeros_like(gt, device=device)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
#tsdf = tsdf_in.float().cuda()
t_start = time.time()
tsdf = tsdf_in.float().to(device)
pred = mod(tsdf)
forward_t = time.time()-t_start
t = time.time()
loss = criteria(pred)
loss_t = time.time()-t
t = time.time()
optimizer.zero_grad()
loss.backward()
back_t = time.time()-t
t = time.time()
optimizer.step()
step_t = time.time()-t
t = time.time()
print(it, loss.data)
print('valuated ', [len(o) for o in pred])
print('GT voxels ', np.count_nonzero([o.numel()>3 for o in criteria.gt_octree]))
print('timing:{total:.3f}. forward {forward_t:.3f}, loss {loss_t:.3f}, back {back_t:.3f}, step {step_t:.3f}'.format(
total=t-t_start, forward_t=forward_t, loss_t=loss_t, back_t=back_t, step_t=step_t))
if (it+1) % 100 == 0:
mod.eval()
out = mod(tsdf)
loss = criteria(out)
for i in range(len(out)):
resample = (2**i)
print('Eval: level %d, %d/%d evaluated' % (i, len(out[i]),
(res/block_size/resample)**3))
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
if loss.data<1:
import ipdb; ipdb.set_trace()
mod.train()
print(it, err)
torch.save(mod, 'model_2tier.pth')
if err < 2 :
break
#assert err < 2
def create_model(block_size, feature_dim, res):
layers = []
layers.append(BottomLevel(feature_dim, block_size))
while block_size*2**len(layers) <= res/2:
print('adding mid layer', len(layers))
layers.append(MidLevel(feature_dim, feature_dim, layers[-1],
block_size, thresh=0.1))
mod = TopLevel(feature_dim, layers[-1], block_size=block_size)
return mod
def test_simple_split(res=64, block_size=block_size):
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000, debug=True)
vis = visdom.Visdom()
mod = torch.load('model.pth')
if device == 'cuda':
mod.cuda()
mod.eval()
gt, tsdf_in = dataset.__getitem_split__()
gt = torch.from_numpy(gt[None, :])
tsdf_in = torch.from_numpy(tsdf_in[None, :])
gt_label = torch.zeros_like(gt, device=device)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
tsdf = tsdf_in.float().to(device)
pred = mod(tsdf)
loss = criteria(pred)
print(loss.data)
print('evaluated ', [len(o) for o in pred])
for X in pred[0]:
X_ = tuple(np.array(X)//2)
print (X, pred[1][X_])
assert pred[1][X_][0,2]>0.5
sdf_ = octree_to_sdf(pred, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
import ipdb; ipdb.set_trace()
for X,v in criteria.gt_octree[0].items():
if v.numel()>1:
assert X[2]==1 #that's how we built the space
def test_split_subtree(padding=0):
feat = torch.rand(1, feature_dim, block_size+2*padding,
block_size+2*padding,
block_size+2*padding
).float()
split = split_tree(feat,padding=padding)
assert len(split) == 8, len(split)
assert torch.all(split[(0, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, padding, padding, padding])
assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, block_size//2+padding, padding, padding])
split[(1, 0, 0)][0, 0, padding, padding, padding] = 12.13
#this is no longer true, I don't know how to do this inplace
#assert feat[0, 0, block_size//2, 0, 0] == 12.13
def test_split_subtree_with_padding():
padding=2
feat = torch.rand(1, feature_dim, block_size, block_size,
block_size).float()
split = split_tree(feat, padding=2)
assert len(split) == 8, len(split)
octant = split[(0,0,0)]
assert torch.all(octant[0, :padding, 0, 0, 0] == 0)
assert torch.all(octant[0, -padding:, 0, 0, 0] == 0)
assert octant.shape[-3:]==feat.shape[-3:]//2+padding*2
assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])
assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])
assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, block_size//2, 0, 0])
split[(1, 0, 0)][0, 0, 0, 0, 0] = 12.13
assert feat[0, 0, block_size//2+padding, 0, 0] == 12.13
if __name__ == '__main__':
import sys
logger.remove()
logger.add(sys.stderr , format="{time} {level} {message}", level="INFO")
#test_4tier_data()
#test_criteria_trivial()
#test_criteria()
#test_criteria(4)
#test_data()
#test_ellipsoid()
#test_convtrans()
#test_split_subtree()
#test_split_subtree(padding=2)
#test_basic_debug()
#test_bottom_io()
#test_simple_net_single_data()
#test_bottom_layer()
# TODO why does this not converge? interesting
#test_2tier_net_single_data()
#test_2tier_net(res=32, block_size=block_size)
test_2tier_net(res=64, block_size=block_size)
test_simple_split(res=64, block_size=block_size)
import ipdb; ipdb.set_trace()
test_2tier_net(res=128, block_size=block_size)
|
#
# * This file is distributed under the terms in the attached LICENSE file.
# * If you do not find this file, copies can be found by writing to:
# * Intel Research Berkeley, 2150 Shattuck Avenue, Suite 1300,
# * Berkeley, CA, 94704. Attention: Intel License Inquiry.
# * Or
# * UC Berkeley EECS Computer Science Division, 387 Soda Hall #1776,
# * Berkeley, CA, 94707. Attention: P2 Group.
#
########### Description ###########
#
#
# Given script runs aggregates.olg test and checks the test output
#
# Assumptions - Three nodes with ports 20202, 20203, 20204. Execute the following:
#
# * At 20202:
# * tests/runOverLog -DME=\"localhost:20202\" -DNEIGHBOR1=\"localhost:20203\" -DNEIGHBOR2=\"localhost:20204\" -o unitTests/olg/aggregates.olg -n localhost -p 20202
#
# * At 20203:
# * tests/runOverLog -DME=\"localhost:20203\" -DNEIGHBOR1=\"localhost:20202\" -DNEIGHBOR2=\"localhost:20204\" -o unitTests/olg/aggregates.olg -n localhost -p 20203
#
# * At 20204:
# * tests/runOverLog -DME=\"localhost:20204\" -DNEIGHBOR1=\"localhost:20202\" -DNEIGHBOR2=\"localhost:20203\" -o unitTests/olg/aggregates.olg -n localhost -p 20204
#
# Expected output - (multiple tuples may be present in the output and the order of the results can vary)
#
# * At 20202:
# ##Print[RecvEvent!smallestNeighborOf!d2_eca!localhost:20202]: [smallestNeighborOf(localhost:20202, localhost:20203)]
# ##Print[RecvEvent!smallestNeighborOf!d2_eca!localhost:20202]: [smallestNeighborOf(localhost:20202, localhost:20204)]
#
# * At 20203
#
# ##Print[RecvEvent!smallestNeighborOf!d2_eca!localhost:20203]: [smallestNeighborOf(localhost:20203, localhost:20202)]
# ##Print[RecvEvent!largestNeighborOf!d1_eca!localhost:20203]: [largestNeighborOf(localhost:20203, localhost:20204)]
#
# *At 20204
# ##Print[RecvEvent!largestNeighborOf!d1_eca!localhost:20204]: [largestNeighborOf(localhost:20204, localhost:20203)]
# ##Print[RecvEvent!largestNeighborOf!d1_eca!localhost:20204]: [largestNeighborOf(localhost:20204, localhost:20202)]
#
#
####################################
#!/usr/bin/python
import os
import time
import threading
import re
import sys
import getopt
import subprocess
# Usage function
def usage():
print """
aggregates.py -E <planner path> -B <olg path> -T <time in seconds>
-E planner path
-B olg path
-T time (secs) for test to run
-h prints usage message
"""
# Function to parse the output file and check whether the output matches the expected value
def script_output(stdout_20202, stdout_20203, stdout_20204):
output = ""
for line in stdout_20202.readlines():
output = output + line
p = re.compile(r"""
([#][#]Print\[RecvEvent \s* RULE \s* d2\]: \s* \[smallestNeighborOf\(localhost:20202, \s* localhost:20203\)\]
.*
[#][#]Print\[RecvEvent \s* RULE \s* d2\]: \s* \[smallestNeighborOf\(localhost:20202, \s* localhost:20204\)\]
|
[#][#]Print\[RecvEvent \s* RULE \s* d2\]: \s* \[smallestNeighborOf\(localhost:20202, \s* localhost:20204\)\]
.*
[#][#]Print\[RecvEvent \s* RULE \s* d2\]: \s* \[smallestNeighborOf\(localhost:20202, \s* localhost:20203\)\])
""", re.VERBOSE|re.DOTALL)
flag = p.search(output)
if flag == 0:
print "Test failed"
return
output = ""
for line in stdout_20203.readlines():
output = output + line
p = re.compile(r"""
([#][#]Print\[RecvEvent \s* RULE \s* d2\]: \s* \[smallestNeighborOf\(localhost:20203, \s* localhost:20202\)\]
.*
[#][#]Print\[RecvEvent \s* RULE \s* d1\]: \s* \[largestNeighborOf\(localhost:20203, \s* localhost:20204\)\]
|
[#][#]Print\[RecvEvent \s* RULE \s* d1\]: \s* \[largestNeighborOf\(localhost:20203, \s* localhost:20204\)\]
.*
[#][#]Print\[RecvEvent \s* RULE \s* d2\]: \s* \[smallestNeighborOf\(localhost:20203, \s* localhost:20202\)\])
""", re.VERBOSE|re.DOTALL)
flag = p.search(output)
if flag == 0:
print "Test failed"
return
output = ""
for line in stdout_20204.readlines():
output = output + line
p = re.compile(r"""
([#][#]Print\[RecvEvent \s* RULE \s* d1\]: \s* \[largestNeighborOf\(localhost:20204, \s* localhost:20203\)\]
.*
[#][#]Print\[RecvEvent \s* RULE \s* d1\]: \s* \[largestNeighborOf\(localhost:20204, \s* localhost:20202\)\]
|
[#][#]Print\[RecvEvent \s* RULE \s* d1\]: \s* \[largestNeighborOf\(localhost:20204, \s* localhost:20202\)\]
.*
[#][#]Print\[RecvEvent \s* RULE \s* d1\]: \s* \[largestNeighborOf\(localhost:20204, \s* localhost:20203\)\])
""", re.VERBOSE|re.DOTALL)
flag = p.search(output)
if flag == 0:
print "Test failed"
return
else:
print "Test passed"
#Function to kill the child after a set time
def kill_pid(stdout_20202, stdout_20203, stdout_20204, pid_20202, pid_20203, pid_20204):
#print "killing child"
os.kill(pid_20202, 3)
os.kill(pid_20203, 3)
os.kill(pid_20204, 3)
#print "program killed"
script_output(stdout_20202, stdout_20203, stdout_20204)
opt, arg = getopt.getopt(sys.argv[1:], 'B:E:T:h')
for key,val in opt:
if key=='-B':
olg_path = val
elif key == '-E':
executable_path = val
elif key == '-T':
time_interval = val
elif key == '-h':
usage()
sys.exit(0)
try:
args=[executable_path , '-DME=\"localhost:20202\"', '-DNEIGHBOR1=\"localhost:20203\"', '-DNEIGHBOR2=\"localhost:20204\"', '-o', os.path.join(olg_path,'aggregates.olg'), '-n', 'localhost', '-p', '20202', '2>&1']
p_20202 = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
except OSError, e:
#print "Execution failed"
print e
sys.exit(0)
time.sleep(1)
pid_20202 = p_20202.pid
#print pid_20202
try:
args=[executable_path , '-DME=\"localhost:20203\"', '-DNEIGHBOR1=\"localhost:20202\"', '-DNEIGHBOR2=\"localhost:20204\"', '-o', os.path.join(olg_path,'aggregates.olg'), '-n', 'localhost', '-p', '20203', '2>&1']
p_20203 = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
except OSError, e:
#print "Execution failed"
print e
sys.exit(0)
time.sleep(1)
pid_20203 = p_20203.pid
#print pid_20203
try:
args=[executable_path , '-DME=\"localhost:20204\"', '-DNEIGHBOR1=\"localhost:20202\"', '-DNEIGHBOR2=\"localhost:20203\"', '-o', os.path.join(olg_path,'aggregates.olg'), '-n', 'localhost', '-p', '20204', '2>&1']
p_20204 = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
except OSError, e:
#print "Execution failed"
print e
sys.exit(0)
pid_20204 = p_20204.pid
#print pid_20204
if os.getpid() != pid_20202 and os.getpid() != pid_20203 and os.getpid() != pid_20204:
t = threading.Timer(int(time_interval), kill_pid, [p_20202.stdout, p_20203.stdout, p_20204.stdout, pid_20202, pid_20203, pid_20204])
t.start()
|
/usr/local/Cellar/python/2.7.14_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/fnmatch.py
|
# coding=utf-8
import nltk
from classification_utils import get_lines_from_file, load_manually_labeled_tweets, aggregate_results
from sklearn.svm import LinearSVC, NuSVC, NuSVR, OneClassSVM, SVC, SVR
from nltk.classify.scikitlearn import SklearnClassifier
__author__ = 'kiro'
def get_list_of_possible_words_in_tweets():
"""
Create frequency distribution of words from dictionaries.
aka setting the dictionary consisting of all interesting words in the tweet set.
@param negative_dict: list of positive words
@param positive_dict: list of negative words
@return:
"""
# TODO fix the shortcomings of not having the option of dynamically changing the feature we use.
positive_dict, negative_dict = "kiro-monogram-positive.txt", "kiro-monogram-negative.txt"
#positive_dict, negative_dict = "bigram-compiled-positive.txt", "bigram-compiled-negative.txt"
dictionary_base = "/home/kiro/ntnu/master/code/dictionaries/"
positive_dict = get_lines_from_file(dictionary_base + positive_dict)
negative_dict = get_lines_from_file(dictionary_base + negative_dict)
word_list = positive_dict + negative_dict
# returns a list of all words that has an effect on the sentiment of a tweet
return nltk.FreqDist(word_list).keys()
def extract_features_from_text(text):
"""
Separating words from a tweet text.
@param text: the tweet or to extract features from.
@return: a list of words to be used for classification.
"""
dictionary = get_list_of_possible_words_in_tweets()
document_words = set(text)
features = {}
# more precision to iterate the word of a tweet then the whole dictionary.
# extracts all words that are both in the dictionary and in the tweet
for word in document_words:
features['contains(%s)' % word] = (word in dictionary)
return features
def initialize_classifier(classifier_class, tweets):
"""
Train and initialize classifier, then return it.
@param tweets: list of tweets
@param classifier_class: the nltk classifier class. Currently NaiveBayesClassifier and DecisionTreeClassifier tested
@return: a nltk classifier.
"""
# get the training set.
#print "INFO -- Compile training set for the classifier"
training_set = nltk.classify.apply_features(extract_features_from_text, tweets)
# create the classifier.
print "INFO -- Training the classifier, this might take some time."
classifier = classifier_class.train(training_set)
#print "INFO -- Training complete."
return classifier
def classify(classifier_class, tweets):
"""
Run the classification of tweets
@param classifier_class: the nltk classifier class that we will use for the classification
@param tweets: the list of tweets to classify
@return: list of classification result (positive/negative)
"""
# instantiate the classifier
classifier = initialize_classifier(classifier_class, tweets)
print "INFO -- Classifying tweets, this might take some time."
results = []
# for all tweets
for tweet in [tweet[0] for tweet in tweets]:
#print tweet
# classify the tweet and append the result to list.
# runs classify() on the given classifier class in the nltk library.
results.append(classifier.classify(extract_features_from_text(tweet)))
#print results[-1], type(results[-1])
return results
def run_classifier(tweet_file, classifier_class, text):
"""
Run the test of the classifier, so we can get some results.
@param tweet_file: the file to load tweets from.
@param classifier_class: the nltk classifier class to be used.
@param text: text to be printed with the results.
"""
# load tweets
classification_base = "/home/kiro/ntnu/master/code/classification/"
tweets = load_manually_labeled_tweets(classification_base + tweet_file)
# classifying all tweets as positive or negative.
sentiment_classification = classify(classifier_class, tweets)
# aggregate results
counts, accuracy = aggregate_results(tweets, sentiment_classification)
print "INFO -- ", text
print "{failed classifications, correct classifications}, accuracy of the classifier"
print counts, "%.4f" % accuracy, "\n"
def test_svm_classes():
"""
Testing all the possible classes of the sklearn.svm library.
"""
# SVM, Linear Support Vector Classification
classifiers = [
LinearSVC(),
NuSVC(),
NuSVR(),
OneClassSVM(),
SVC(),
SVR()
]
for c in classifiers:
print c
classifier = SklearnClassifier(c)
run_classifier("tweets_classified_manually", classifier, "Kiro tweets, SVM")
# easy running
if __name__ == "__main__":
# list of [filename, description]
tweet_sets = [
["tweets_classified_manually", "Kiro compiled dataset"],
["obama_tweets_classified_manually", "Obama tweet set"]
]
classification_classes = [
# Decision Tree classification.
# Using this takes so long that I have always canceled the execution.
#nltk.DecisionTreeClassifier,
# Naive Bayes classification
nltk.NaiveBayesClassifier,
# SVM, Linear Support Vector Classification
SklearnClassifier(LinearSVC())
]
for classification_class in classification_classes:
for data_file, description in tweet_sets:
print "--", description, "--"
run_classifier(data_file, classification_class, str(classification_class))
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import cast, List, TYPE_CHECKING
import time
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.exceptions import ServiceResponseTimeoutError
from ._timer import Timer
from .._utils import is_retryable_status_code
from .._search_indexing_buffered_sender_base import SearchIndexingBufferedSenderBase
from ...indexes.aio import SearchIndexClient as SearchServiceClient
from .._generated.aio import SearchIndexClient
from .._generated.models import IndexBatch, IndexingResult
from .._search_documents_error import RequestEntityTooLargeError
from ._index_documents_batch_async import IndexDocumentsBatch
from ..._headers_mixin import HeadersMixin
from ..._version import SDK_MONIKER
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import AzureKeyCredential
class SearchIndexingBufferedSender(SearchIndexingBufferedSenderBase, HeadersMixin):
"""A buffered sender for document indexing actions.
:param endpoint: The URL endpoint of an Azure search service
:type endpoint: str
:param index_name: The name of the index to connect to
:type index_name: str
:param credential: A credential to authorize search client requests
:type credential: ~azure.core.credentials.AzureKeyCredential
:keyword bool auto_flush: if the auto flush mode is on. Default to True.
:keyword int auto_flush_interval: how many max seconds if between 2 flushes. This only takes effect
when auto_flush is on. Default to 60 seconds. If a non-positive number is set, it will be default
to 86400s (1 day)
:keyword int initial_batch_action_count: The initial number of actions to group into a batch when
tuning the behavior of the sender. The default value is 512.
:keyword int max_retries: The number of times to retry a failed document. The default value is 3.
:keyword callable on_new: If it is set, the client will call corresponding methods when there
is a new IndexAction added.
:keyword callable on_progress: If it is set, the client will call corresponding methods when there
is a IndexAction succeeds.
:keyword callable on_error: If it is set, the client will call corresponding methods when there
is a IndexAction fails.
:keyword callable on_remove: If it is set, the client will call corresponding methods when there
is a IndexAction removed from the queue (succeeds or fails).
:keyword str api_version: The Search API version to use for requests.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, endpoint, index_name, credential, **kwargs):
# type: (str, str, AzureKeyCredential, **Any) -> None
super(SearchIndexingBufferedSender, self).__init__(
endpoint=endpoint,
index_name=index_name,
credential=credential,
**kwargs)
self._index_documents_batch = IndexDocumentsBatch()
self._client = SearchIndexClient(
endpoint=endpoint, index_name=index_name, sdk_moniker=SDK_MONIKER, **kwargs
) # type: SearchIndexClient
self._reset_timer()
async def _cleanup(self, flush=True):
# type: () -> None
"""Clean up the client.
:param bool flush: flush the actions queue before shutdown the client
Default to True.
"""
if flush:
await self.flush()
if self._auto_flush:
self._timer.cancel()
def __repr__(self):
# type: () -> str
return "<SearchIndexingBufferedSender [endpoint={}, index={}]>".format(
repr(self._endpoint), repr(self._index_name)
)[:1024]
@property
def actions(self):
# type: () -> List[IndexAction]
"""The list of currently index actions in queue to index.
:rtype: List[IndexAction]
"""
return self._index_documents_batch.actions
@distributed_trace_async
async def close(self, **kwargs): # pylint: disable=unused-argument
# type: () -> None
"""Close the :class:`~azure.search.documents.aio.SearchClient` session."""
await self._cleanup(flush=True)
return await self._client.close()
@distributed_trace_async
async def flush(self, timeout=86400, **kwargs): # pylint:disable=unused-argument
# type: (bool) -> bool
"""Flush the batch.
:param int timeout: time out setting. Default is 86400s (one day)
:return: True if there are errors. Else False
:rtype: bool
"""
has_error = False
begin_time = int(time.time())
while len(self.actions) > 0:
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
result = await self._process(timeout=remaining, raise_error=False)
if result:
has_error = True
return has_error
async def _process(self, timeout=86400, **kwargs):
# type: (int) -> bool
raise_error = kwargs.pop("raise_error", True)
actions = await self._index_documents_batch.dequeue_actions()
has_error = False
if not self._index_key:
try:
client = SearchServiceClient(self._endpoint, self._credential)
result = await client.get_index(self._index_name)
if result:
for field in result.fields:
if field.key:
self._index_key = field.name
break
except Exception: # pylint: disable=broad-except
pass
self._reset_timer()
try:
results = await self._index_documents_actions(actions=actions, timeout=timeout)
for result in results:
try:
action = next(x for x in actions if x.additional_properties.get(self._index_key) == result.key)
if result.succeeded:
await self._callback_succeed(action)
elif is_retryable_status_code(result.status_code):
await self._retry_action(action)
has_error = True
else:
await self._callback_fail(action)
has_error = True
except StopIteration:
pass
return has_error
except Exception: # pylint: disable=broad-except
for action in actions:
await self._retry_action(action)
if raise_error:
raise
return True
async def _process_if_needed(self):
# type: () -> bool
""" Every time when a new action is queued, this method
will be triggered. It checks the actions already queued and flushes them if:
1. Auto_flush is on
2. There are self._batch_action_count actions queued
"""
if not self._auto_flush:
return
if len(self._index_documents_batch.actions) < self._batch_action_count:
return
await self._process(raise_error=False)
def _reset_timer(self):
# pylint: disable=access-member-before-definition
try:
self._timer.cancel()
except AttributeError:
pass
if self._auto_flush:
self._timer = Timer(self._auto_flush_interval, self._process)
@distributed_trace_async
async def upload_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue upload documents actions.
:param documents: A list of documents to upload.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_upload_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def delete_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue delete documents actions
:param documents: A list of documents to delete.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_delete_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue merge documents actions
:param documents: A list of documents to merge.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_merge_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_or_upload_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue merge documents or upload documents actions
:param documents: A list of documents to merge or upload.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_merge_or_upload_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def index_documents(self, batch, **kwargs):
# type: (IndexDocumentsBatch, **Any) -> List[IndexingResult]
"""Specify a document operations to perform as a batch.
:param batch: A batch of document operations to perform.
:type batch: IndexDocumentsBatch
:rtype: List[IndexingResult]
:raises :class:`~azure.search.documents.RequestEntityTooLargeError`
"""
return await self._index_documents_actions(actions=batch.actions, **kwargs)
async def _index_documents_actions(self, actions, **kwargs):
# type: (List[IndexAction], **Any) -> List[IndexingResult]
error_map = {413: RequestEntityTooLargeError}
timeout = kwargs.pop('timeout', 86400)
begin_time = int(time.time())
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
try:
index_documents = IndexBatch(actions=actions)
batch_response = await self._client.documents.index(batch=index_documents, error_map=error_map, **kwargs)
return cast(List[IndexingResult], batch_response.results)
except RequestEntityTooLargeError:
if len(actions) == 1:
raise
pos = round(len(actions) / 2)
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_first_half = await self._index_documents_actions(
actions=actions[:pos],
error_map=error_map,
**kwargs
)
if len(batch_response_first_half) > 0:
result_first_half = cast(List[IndexingResult], batch_response_first_half.results)
else:
result_first_half = []
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_second_half = await self._index_documents_actions(
actions=actions[pos:],
error_map=error_map,
**kwargs
)
if len(batch_response_second_half) > 0:
result_second_half = cast(List[IndexingResult], batch_response_second_half.results)
else:
result_second_half = []
return result_first_half.extend(result_second_half)
async def __aenter__(self):
# type: () -> SearchIndexingBufferedSender
await self._client.__aenter__() # pylint: disable=no-member
return self
async def __aexit__(self, *args):
# type: (*Any) -> None
await self.close()
await self._client.__aexit__(*args) # pylint: disable=no-member
async def _retry_action(self, action):
# type: (IndexAction) -> None
if not self._index_key:
await self._callback_fail(action)
return
key = action.additional_properties.get(self._index_key)
counter = self._retry_counter.get(key)
if not counter:
# first time that fails
self._retry_counter[key] = 1
await self._index_documents_batch.enqueue_action(action)
elif counter < self._max_retries - 1:
# not reach retry limit yet
self._retry_counter[key] = counter + 1
await self._index_documents_batch.enqueue_action(action)
else:
await self._callback_fail(action)
async def _callback_succeed(self, action):
# type: (IndexAction) -> None
if self._on_remove:
await self._on_remove(action)
if self._on_progress:
await self._on_progress(action)
async def _callback_fail(self, action):
# type: (IndexAction) -> None
if self._on_remove:
await self._on_remove(action)
if self._on_error:
await self._on_error(action)
async def _callback_new(self, actions):
# type: (List[IndexAction]) -> None
if self._on_new:
for action in actions:
await self._on_new(action)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
item.pop('Region')
for media_type, weight in item.iteritems():
if media_type != 'Country' or media_type != 'Region':
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0001_initial'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [{'Country': 'AF',
'Internet': '0.37',
'Print': '0.33',
'Radio': '0.93',
'Region': 'Asia',
'Television': '0.93',
'Twitter': 1},
{'Country': 'AL',
'Internet': '0.36',
'Print': '1.02',
'Radio': '0.30',
'Region': 'Europe',
'Television': '0.30',
'Twitter': 1},
{'Country': 'AG',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'AR',
'Internet': '1.34',
'Print': '0.74',
'Radio': '1.07',
'Region': 'Latin America',
'Television': '1.07',
'Twitter': 1},
{'Country': 'AM',
'Internet': '0.31',
'Print': '1.02',
'Radio': '0.29',
'Region': 'Europe',
'Television': '0.29',
'Twitter': 1},
{'Country': 'AU',
'Internet': '1.23',
'Print': '0.98',
'Radio': '0.81',
'Region': 'Pacific Islands',
'Television': '0.81',
'Twitter': 1},
{'Country': 'AT',
'Internet': '0.72',
'Print': '0.58',
'Radio': '0.48',
'Region': 'Europe',
'Television': '0.48',
'Twitter': 1},
{'Country': 'BS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BD',
'Internet': '0.88',
'Print': '3.63',
'Radio': '2.09',
'Region': 'Asia',
'Television': '2.09',
'Twitter': 1},
{'Country': 'BB',
'Internet': '0.13',
'Print': '0.13',
'Radio': '0.09',
'Region': 'Caribbean',
'Television': '0.09',
'Twitter': 1},
{'Country': 'BY',
'Internet': '0.59',
'Print': '0.47',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'BE',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BZ',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BJ',
'Internet': '0.18',
'Print': '0.03',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'BT',
'Internet': '0.12',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Asia',
'Television': '0.14',
'Twitter': 1},
{'Country': 'BO',
'Internet': '0.53',
'Print': '0.42',
'Radio': '0.55',
'Region': 'Latin America',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BA',
'Internet': '0.43',
'Print': '0.68',
'Radio': '0.32',
'Region': 'Europe',
'Television': '0.32',
'Twitter': 1},
{'Country': 'BW',
'Internet': '0.14',
'Print': '0.18',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'BR',
'Internet': '2.78',
'Print': '1.64',
'Radio': '2.35',
'Region': 'Latin America',
'Television': '2.35',
'Twitter': 1},
{'Country': 'BG',
'Internet': '0.54',
'Print': '0.41',
'Radio': '0.44',
'Region': 'Europe',
'Television': '0.44',
'Twitter': 1},
{'Country': 'BF',
'Internet': '0.23',
'Print': '0.10',
'Radio': '0.69',
'Region': 'Africa',
'Television': '0.69',
'Twitter': 1},
{'Country': 'BI',
'Internet': '0.10',
'Print': '0.10',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'CM',
'Internet': '0.33',
'Print': '0.17',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'CA',
'Internet': '1.54',
'Print': '1.31',
'Radio': '0.99',
'Region': 'North America',
'Television': '0.99',
'Twitter': 1},
{'Country': 'CV',
'Internet': '0.12',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Africa',
'Television': '0.12',
'Twitter': 1},
{'Country': 'CF',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'TD',
'Internet': '0.15',
'Print': '0.00',
'Radio': '0.60',
'Region': 'Africa',
'Television': '0.60',
'Twitter': 1},
{'Country': 'CL',
'Internet': '0.92',
'Print': '0.37',
'Radio': '0.70',
'Region': 'Latin America',
'Television': '0.70',
'Twitter': 1},
{'Country': 'CN',
'Internet': '6.79',
'Print': '6.23',
'Radio': '6.18',
'Region': 'Asia',
'Television': '6.18',
'Twitter': 1},
{'Country': 'CO',
'Internet': '1.36',
'Print': '0.66',
'Radio': '1.16',
'Region': 'Latin America',
'Television': '1.16',
'Twitter': 1},
{'Country': 'KM',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Africa',
'Television': '0.14',
'Twitter': 1},
{'Country': 'CD',
'Internet': '0.08',
'Print': '0.28',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'CG',
'Internet': '0.33',
'Print': '0.11',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'CR',
'Internet': '0.42',
'Print': '0.34',
'Radio': '0.37',
'Region': 'Latin America',
'Television': '0.37',
'Twitter': 1},
{'Country': 'HR',
'Internet': '0.45',
'Print': '0.41',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'CU',
'Internet': '0.47',
'Print': '0.12',
'Radio': '0.56',
'Region': 'Caribbean',
'Television': '0.56',
'Twitter': 1},
{'Country': 'CY',
'Internet': '0.23',
'Print': '0.13',
'Radio': '0.18',
'Region': 'Middle East',
'Television': '0.18',
'Twitter': 1},
{'Country': 'DK',
'Internet': '0.50',
'Print': '0.74',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'DO',
'Internet': '0.60',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'EC',
'Internet': '0.66',
'Print': '0.72',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'EG',
'Internet': '1.70',
'Print': '1.43',
'Radio': '1.51',
'Region': 'Middle East',
'Television': '1.51',
'Twitter': 1},
{'Country': 'SV',
'Internet': '0.35',
'Print': '0.32',
'Radio': '0.42',
'Region': 'Latin America',
'Television': '0.42',
'Twitter': 1},
{'Country': 'GQ',
'Internet': '0.09',
'Print': '0.68',
'Radio': '0.15',
'Region': 'Africa',
'Television': '0.15',
'Twitter': 1},
{'Country': 'EE',
'Internet': '0.27',
'Print': '0.27',
'Radio': '0.19',
'Region': 'Europe',
'Television': '0.19',
'Twitter': 1},
{'Country': 'ET',
'Internet': '0.34',
'Print': '0.39',
'Radio': '1.63',
'Region': 'Africa',
'Television': '1.63',
'Twitter': 1},
{'Country': 'FJ',
'Internet': '0.15',
'Print': '0.12',
'Radio': '0.16',
'Region': 'Pacific Islands',
'Television': '0.16',
'Twitter': 1},
{'Country': 'FI',
'Internet': '0.61',
'Print': '0.03',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'FR',
'Internet': '1.99',
'Print': '1.69',
'Radio': '1.33',
'Region': 'Europe',
'Television': '1.33',
'Twitter': 1},
{'Country': 'GA',
'Internet': '0.11',
'Print': '0.58',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GM',
'Internet': '0.14',
'Print': '0.04',
'Radio': '0.23',
'Region': 'Africa',
'Television': '0.23',
'Twitter': 1},
{'Country': 'GE',
'Internet': '0.40',
'Print': '1.02',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'DE',
'Internet': '2.27',
'Print': '2.50',
'Radio': '1.51',
'Region': 'Europe',
'Television': '1.51',
'Twitter': 1},
{'Country': 'GH',
'Internet': '0.61',
'Print': '0.39',
'Radio': '0.85',
'Region': 'Africa',
'Television': '0.85',
'Twitter': 1},
{'Country': 'GR',
'Internet': '0.68',
'Print': '0.44',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'GD',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'GT',
'Internet': '0.44',
'Print': '0.38',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'GW',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GN',
'Internet': '0.68',
'Print': '1.67',
'Radio': '0.56',
'Region': 'Africa',
'Television': '0.56',
'Twitter': 1},
{'Country': 'GY',
'Internet': '0.15',
'Print': '0.15',
'Radio': '0.15',
'Region': 'Caribbean',
'Television': '0.15',
'Twitter': 1},
{'Country': 'HT',
'Internet': '0.30',
'Print': '0.17',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'HU',
'Internet': '0.73',
'Print': '0.68',
'Radio': '0.52',
'Region': 'Europe',
'Television': '0.52',
'Twitter': 1},
{'Country': 'IS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Europe',
'Television': '0.10',
'Twitter': 1},
{'Country': 'IN',
'Internet': '4.18',
'Print': '5.72',
'Radio': '5.90',
'Region': 'Asia',
'Television': '5.90',
'Twitter': 1},
{'Country': 'IE',
'Internet': '0.52',
'Print': '0.18',
'Radio': '0.36',
'Region': 'Europe',
'Television': '0.36',
'Twitter': 1},
{'Country': 'IL',
'Internet': '0.65',
'Print': '0.89',
'Radio': '0.46',
'Region': 'Middle East',
'Television': '0.46',
'Twitter': 1},
{'Country': 'IT',
'Internet': '1.62',
'Print': '1.51',
'Radio': '1.29',
'Region': 'Europe',
'Television': '1.29',
'Twitter': 1},
{'Country': 'CI',
'Internet': '0.73',
'Print': '1.02',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'JM',
'Internet': '0.32',
'Print': '0.27',
'Radio': '0.28',
'Region': 'Caribbean',
'Television': '0.28',
'Twitter': 1},
{'Country': 'JP',
'Internet': '2.80',
'Print': '5.27',
'Radio': '1.87',
'Region': 'Asia',
'Television': '1.87',
'Twitter': 1},
{'Country': 'KZ',
'Internet': '0.84',
'Print': '0.58',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'KE',
'Internet': '1.10',
'Print': '0.44',
'Radio': '1.12',
'Region': 'Africa',
'Television': '1.12',
'Twitter': 1},
{'Country': 'KG',
'Internet': '0.31',
'Print': '0.05',
'Radio': '0.39',
'Region': 'Asia',
'Television': '0.39',
'Twitter': 1},
{'Country': 'LB',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.37',
'Region': 'Middle East',
'Television': '0.37',
'Twitter': 1},
{'Country': 'LS',
'Internet': '0.09',
'Print': '0.08',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'LR',
'Internet': '0.12',
'Print': '0.13',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'LU',
'Internet': '0.19',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Europe',
'Television': '0.12',
'Twitter': 1},
{'Country': 'MK',
'Internet': '0.22',
'Print': '0.58',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'MG',
'Internet': '1.11',
'Print': '0.19',
'Radio': '0.80',
'Region': 'Africa',
'Television': '0.80',
'Twitter': 1},
{'Country': 'MW',
'Internet': '0.93',
'Print': '0.11',
'Radio': '0.68',
'Region': 'Africa',
'Television': '0.68',
'Twitter': 1},
{'Country': 'MY',
'Internet': '0.22',
'Print': '1.07',
'Radio': '0.91',
'Region': 'Asia',
'Television': '0.91',
'Twitter': 1},
{'Country': 'ML',
'Internet': '0.92',
'Print': '0.68',
'Radio': '0.66',
'Region': 'Africa',
'Television': '0.66',
'Twitter': 1},
{'Country': 'MT',
'Internet': '0.11',
'Print': '0.13',
'Radio': '0.11',
'Region': 'Europe',
'Television': '0.11',
'Twitter': 1},
{'Country': 'MR',
'Internet': '0.18',
'Print': '0.68',
'Radio': '0.33',
'Region': 'Africa',
'Television': '0.33',
'Twitter': 1},
{'Country': 'MU',
'Internet': '0.07',
'Print': '0.62',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'MX',
'Internet': '1.91',
'Print': '0.06',
'Radio': '1.84',
'Region': 'Latin America',
'Television': '1.84',
'Twitter': 1},
{'Country': 'MD',
'Internet': '0.33',
'Print': '0.16',
'Radio': '0.31',
'Region': 'Europe',
'Television': '0.31',
'Twitter': 1},
{'Country': 'MN',
'Internet': '0.19',
'Print': '0.14',
'Radio': '0.28',
'Region': 'Asia',
'Television': '0.28',
'Twitter': 1},
{'Country': 'ME',
'Internet': '0.16',
'Print': '0.00',
'Radio': '0.13',
'Region': 'Europe',
'Television': '0.13',
'Twitter': 1},
{'Country': 'MA',
'Internet': '1.20',
'Print': '0.38',
'Radio': '0.96',
'Region': 'Middle East',
'Television': '0.96',
'Twitter': 1},
{'Country': 'NA',
'Internet': '0.16',
'Print': '0.15',
'Radio': '0.25',
'Region': 'Africa',
'Television': '0.25',
'Twitter': 1},
{'Country': 'NP',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.88',
'Region': 'Asia',
'Television': '0.88',
'Twitter': 1},
{'Country': 'NL',
'Internet': '1.08',
'Print': '1.19',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'NZ',
'Internet': '0.55',
'Print': '0.68',
'Radio': '0.35',
'Region': 'Pacific Islands',
'Television': '0.35',
'Twitter': 1},
{'Country': 'NI',
'Internet': '0.25',
'Print': '0.26',
'Radio': '0.41',
'Region': 'Latin America',
'Television': '0.41',
'Twitter': 1},
{'Country': 'NE',
'Internet': '0.15',
'Print': '0.08',
'Radio': '0.71',
'Region': 'Africa',
'Television': '0.71',
'Twitter': 1},
{'Country': 'NG',
'Internet': '2.19',
'Print': '1.19',
'Radio': '2.21',
'Region': 'Africa',
'Television': '2.21',
'Twitter': 1},
{'Country': 'NO',
'Internet': '0.59',
'Print': '0.83',
'Radio': '0.37',
'Region': 'Europe',
'Television': '0.37',
'Twitter': 1},
{'Country': 'PK',
'Internet': '1.20',
'Print': '0.06',
'Radio': '2.25',
'Region': 'Asia',
'Television': '2.25',
'Twitter': 1},
{'Country': 'PS',
'Internet': '0.54',
'Print': '0.00',
'Radio': '0.59',
'Region': 'Middle East',
'Television': '0.59',
'Twitter': 1},
{'Country': 'PY',
'Internet': '0.38',
'Print': '0.31',
'Radio': '0.44',
'Region': 'Latin America',
'Television': '0.44',
'Twitter': 1},
{'Country': 'PE',
'Internet': '0.95',
'Print': '1.92',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'PH',
'Internet': '1.68',
'Print': '1.65',
'Radio': '1.66',
'Region': 'Asia',
'Television': '1.66',
'Twitter': 1},
{'Country': 'PL',
'Internet': '1.36',
'Print': '1.11',
'Radio': '1.02',
'Region': 'Europe',
'Television': '1.02',
'Twitter': 1},
{'Country': 'PT',
'Internet': '0.71',
'Print': '0.63',
'Radio': '0.54',
'Region': 'Europe',
'Television': '0.54',
'Twitter': 1},
{'Country': 'PR',
'Internet': '0.38',
'Print': '0.53',
'Radio': '0.32',
'Region': 'Latin America',
'Television': '0.32',
'Twitter': 1},
{'Country': 'RO',
'Internet': '0.90',
'Print': '0.65',
'Radio': '0.77',
'Region': 'Europe',
'Television': '0.77',
'Twitter': 1},
{'Country': 'WS',
'Internet': '0.04',
'Print': '0.68',
'Radio': '0.07',
'Region': 'Pacific Islands',
'Television': '0.07',
'Twitter': 1},
{'Country': 'SN',
'Internet': '0.48',
'Print': '0.21',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'RS',
'Internet': '0.58',
'Print': '0.58',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'SL',
'Internet': '0.08',
'Print': '0.07',
'Radio': '0.41',
'Region': 'Africa',
'Television': '0.41',
'Twitter': 1},
{'Country': 'SK',
'Internet': '0.57',
'Print': '0.68',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'SI',
'Internet': '0.33',
'Print': '0.31',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'SB',
'Internet': '0.06',
'Print': '0.04',
'Radio': '0.13',
'Region': 'Pacific Islands',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SO',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'ZA',
'Internet': '1.34',
'Print': '0.76',
'Radio': '1.21',
'Region': 'Africa',
'Television': '1.21',
'Twitter': 1},
{'Country': 'KR',
'Internet': '1.80',
'Print': '1.67',
'Radio': '1.17',
'Region': 'Asia',
'Television': '1.17',
'Twitter': 1},
{'Country': 'ES',
'Internet': '1.59',
'Print': '1.35',
'Radio': '1.14',
'Region': 'Europe',
'Television': '1.14',
'Twitter': 1},
{'Country': 'LC',
'Internet': '0.06',
'Print': '0.18',
'Radio': '0.07',
'Region': 'Caribbean',
'Television': '0.07',
'Twitter': 1},
{'Country': 'VC',
'Internet': '0.05',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'SD',
'Internet': '0.82',
'Print': '0.60',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'SS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.48',
'Region': 'Africa',
'Television': '0.48',
'Twitter': 1},
{'Country': 'SR',
'Internet': '0.12',
'Print': '0.12',
'Radio': '0.13',
'Region': 'Caribbean',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SZ',
'Internet': '0.15',
'Print': '0.10',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'SE',
'Internet': '0.78',
'Print': '1.11',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'CH',
'Internet': '0.72',
'Print': '0.94',
'Radio': '0.47',
'Region': 'Europe',
'Television': '0.47',
'Twitter': 1},
{'Country': 'TW',
'Internet': '1.00',
'Print': '0.68',
'Radio': '0.80',
'Region': 'Asia',
'Television': '0.80',
'Twitter': 1},
{'Country': 'TZ',
'Internet': '0.74',
'Print': '0.35',
'Radio': '1.18',
'Region': 'Africa',
'Television': '1.18',
'Twitter': 1},
{'Country': 'TG',
'Internet': '0.15',
'Print': '0.07',
'Radio': '0.44',
'Region': 'Africa',
'Television': '0.44',
'Twitter': 1},
{'Country': 'TO',
'Internet': '0.05',
'Print': '0.05',
'Radio': '0.05',
'Region': 'Pacific Islands',
'Television': '0.05',
'Twitter': 1},
{'Country': 'TT',
'Internet': '0.25',
'Print': '0.18',
'Radio': '0.19',
'Region': 'Caribbean',
'Television': '0.19',
'Twitter': 1},
{'Country': 'TN',
'Internet': '0.60',
'Print': '0.31',
'Radio': '0.55',
'Region': 'Middle East',
'Television': '0.55',
'Twitter': 1},
{'Country': 'TR',
'Internet': '1.59',
'Print': '0.94',
'Radio': '1.44',
'Region': 'Europe',
'Television': '1.44',
'Twitter': 1},
{'Country': 'UG',
'Internet': '0.68',
'Print': '0.16',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'GB',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'US',
'Internet': '4.48',
'Print': '4.43',
'Radio': '2.98',
'Region': 'North America',
'Television': '2.98',
'Twitter': 1},
{'Country': 'UY',
'Internet': '0.38',
'Print': '0.56',
'Radio': '0.31',
'Region': 'Latin America',
'Television': '0.31',
'Twitter': 1},
{'Country': 'VU',
'Internet': '0.05',
'Print': '0.58',
'Radio': '0.08',
'Region': 'Asia',
'Television': '0.08',
'Twitter': 1},
{'Country': 'VE',
'Internet': '1.02',
'Print': '1.01',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'VN',
'Internet': '1.69',
'Print': '0.52',
'Radio': '1.59',
'Region': 'Asia',
'Television': '1.59',
'Twitter': 1},
{'Country': 'ZM',
'Internet': '0.41',
'Print': '0.15',
'Radio': '0.64',
'Region': 'Africa',
'Television': '0.64',
'Twitter': 1},
{'Country': 'ZW',
'Internet': '0.45',
'Print': '0.30',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'WL',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'SQ',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'B1',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'B2',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1}]
|
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, ResetPassword, ResetPasswordRequest, \
ChangeEmailForm
from ..email import send_email
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint \
and request.blueprint != 'auth' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data) # 第二个参数为保持登录的Boolean
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('main.index')
return redirect(next)
flash('Invalid email or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token(3600)
send_email(user.email, 'Confirm your account', 'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
db.session.commit()
flash('Account has been confirmed.Thanks')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token(3600)
send_email(current_user.email, 'Confirm your account', 'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.new_password.data
db.session.add(current_user)
db.session.commit()
flash('New password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password')
return render_template('auth/change_password.html', form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def reset_password_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = ResetPasswordRequest()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset your password', 'auth/email/reset_password', user=user, token=token)
flash('An email with instructions to reset your password has been sent to you.')
return redirect(url_for('auth/login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index')) # 防止已登录用户误点击
form = ResetPassword()
if form.validate_on_submit():
if User.reset_password(token, form.reset_password.data):
db.session.commit()
flash('Password had been updated')
return redirect(url_for('auth.login'))
else:
flash('Error Please reset your password again')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change_email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.newemail.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Update your Email Address', 'auth/email/change_email', user=current_user,
token=token)
flash('A confirmation link has been sent to your new Email address')
return redirect(url_for('auth.login'))
else:
flash('Invalid email or password')
return render_template('auth/change_email.html', form=form)
@auth.route('/change_email/<token>')
@login_required
def email_change(token):
if current_user.change_email(token):
db.session.commit()
flash('New Email address has been updated')
else:
flash('Invalid request')
return redirect(url_for('main.index'))
|
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An example script that demonstrates converting a proprietary format to a
# Google Transit Feed Specification file.
#
# You can load table.txt, the example input, in Excel. It contains three
# sections:
# 1) A list of global options, starting with a line containing the word
# 'options'. Each option has an name in the first column and most options
# have a value in the second column.
# 2) A table of stops, starting with a line containing the word 'stops'. Each
# row of the table has 3 columns: name, latitude, longitude
# 3) A list of routes. There is an empty row between each route. The first row
# for a route lists the short_name and long_name. After the first row the
# left-most column lists the stop names visited by the route. Each column
# contains the times a single trip visits the stops.
#
# This is very simple example which you could use as a base for your own
# transit feed builder.
import transitfeed
from optparse import OptionParser
import re
stops = {}
# table is a list of lists in this form
# [ ['Short Name', 'Long Name'],
# ['Stop 1', 'Stop 2', ...]
# [time_at_1, time_at_2, ...] # times for trip 1
# [time_at_1, time_at_2, ...] # times for trip 2
# ... ]
def add_route_to_schedule(schedule, table):
if len(table) >= 2:
r = schedule.add_route(short_name=table[0][0], long_name=table[0][1], route_type='Bus')
for trip in table[2:]:
if len(trip) > len(table[1]):
print("ignoring %s" % trip[len(table[1]):])
trip = trip[0:len(table[1])]
t = r.add_trip(schedule, headsign='My headsign')
trip_stops = [] # Build a list of (time, stopname) tuples
for i in range(0, len(trip)):
if re.search(r'\S', trip[i]):
trip_stops.append( (transitfeed.time_to_seconds_since_midnight(trip[i]), table[1][i]) )
trip_stops.sort() # Sort by time
for (time, stopname) in trip_stops:
t.add_stop_time(stop=stops[stopname.lower()], arrival_secs=time, departure_secs=time)
def transpose_table(table):
"""Transpose a list of lists, using None to extend all input lists to the
same length.
For example:
>>> transpose_table(
[ [11, 12, 13],
[21, 22],
[31, 32, 33, 34]])
[ [11, 21, 31],
[12, 22, 32],
[13, None, 33],
[None, None, 34]]
"""
transposed = []
rows = len(table)
cols = max(len(row) for row in table)
for x in range(cols):
transposed.append([])
for y in range(rows):
if x < len(table[y]):
transposed[x].append(table[y][x])
else:
transposed[x].append(None)
return transposed
def process_options(schedule, table):
service_period = schedule.get_default_service_period()
agency_name, agency_url, agency_timezone = (None, None, None)
for row in table[1:]:
command = row[0].lower()
if command == 'weekday':
service_period.set_weekday_service()
elif command == 'start_date':
service_period.set_start_date(row[1])
elif command == 'end_date':
service_period.set_end_date(row[1])
elif command == 'add_date':
service_period.set_date_has_service(date=row[1])
elif command == 'remove_date':
service_period.set_date_has_service(date=row[1], has_service=False)
elif command == 'agency_name':
agency_name = row[1]
elif command == 'agency_url':
agency_url = row[1]
elif command == 'agency_timezone':
agency_timezone = row[1]
if not (agency_name and agency_url and agency_timezone):
print("You must provide agency information")
schedule.new_default_agency(agency_name=agency_name, agency_url=agency_url, agency_timezone=agency_timezone)
def add_stops(schedule, table):
for name, lat_str, lng_str in table[1:]:
stop = schedule.add_stop(lat=float(lat_str), lng=float(lng_str), name=name)
stops[name.lower()] = stop
def process_table(schedule, table):
if table[0][0].lower() == 'options':
process_options(schedule, table)
elif table[0][0].lower() == 'stops':
add_stops(schedule, table)
else:
transposed = [table[0]] # Keep route_short_name and route_long_name on first row
# Transpose rest of table. Input contains the stop names in table[x][0], x
# >= 1 with trips found in columns, so we need to transpose table[1:].
# As a diagram Transpose from
# [['stop 1', '10:00', '11:00', '12:00'],
# ['stop 2', '10:10', '11:10', '12:10'],
# ['stop 3', '10:20', '11:20', '12:20']]
# to
# [['stop 1', 'stop 2', 'stop 3'],
# ['10:00', '10:10', '10:20'],
# ['11:00', '11:11', '11:20'],
# ['12:00', '12:12', '12:20']]
transposed.extend(transpose_table(table[1:]))
add_route_to_schedule(schedule, transposed)
def main():
parser = OptionParser()
parser.add_option('--input', dest='input',
help='Path of input file')
parser.add_option('--output', dest='output',
help='Path of output file, should end in .zip')
parser.set_defaults(output='feed.zip')
(options, args) = parser.parse_args()
schedule = transitfeed.Schedule()
table = []
for line in open(options.input):
line = line.rstrip()
if not line:
process_table(schedule, table)
table = []
else:
table.append(line.split('\t'))
process_table(schedule, table)
schedule.write_google_transit_feed(options.output)
if __name__ == '__main__':
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch.utils.data
class FairseqDataset(torch.utils.data.Dataset):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return False
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
raise NotImplementedError
|
import frappe
from frappe.utils import flt
def merge_bundled_items(self, method):
bundles = {}
item_meta = frappe.get_meta(self.doctype + " Item")
count = 0
copy_fields = ['qty', 'stock_qty']
sum_fields = ['total_weight', 'amount', 'net_amount']
rate_fields = [('rate', 'amount'), ('net_rate', 'net_amount'), ('weight_per_unit', 'total_weight')]
base_fields = [('base_' + f, f) for f in sum_fields if item_meta.has_field('base_' + f)]
base_fields += [('base_' + f, f) for f in copy_fields if item_meta.has_field('base_' + f)]
base_fields += [('base_' + t, t) for t, s in rate_fields if item_meta.has_field('base_' + t)]
# Sum amounts
in_bundle = 0
for item in self.items:
if item.bsbt == 'Bundle Start':
in_bundle = item.idx
if not in_bundle or item.bsbt == 'Bundle Start':
new_bundle = frappe._dict()
for f in copy_fields:
new_bundle[f] = item.get(f)
bundles[item.idx] = new_bundle
group_item = bundles[in_bundle or item.idx]
if item.bsbt == 'Bundle Terminate':
in_bundle = 0
for f in sum_fields:
group_item[f] = group_item.get(f, 0) + flt(item.get(f))
group_item_serial_nos = group_item.setdefault('serial_no', [])
if item.get('serial_no'):
group_item_serial_nos += filter(lambda s: s, item.serial_no.split('\n'))
# Calculate average rates and get serial nos string
for group_item in bundles.values():
if group_item.qty:
for target, source in rate_fields:
group_item[target] = flt(group_item[source]) / flt(group_item.qty)
else:
for target, source in rate_fields:
group_item[target] = 0
group_item.serial_no = '\n'.join(group_item.serial_no)
# Calculate company currency values
for group_item in bundles.values():
for target, source in base_fields:
group_item[target] = group_item.get(source, 0) * self.conversion_rate
# Remove duplicates and set aggregated values
to_remove = []
for item in self.items:
if item.idx in bundles.keys():
count += 1
item.update(bundles[item.idx])
del bundles[item.idx]
item.idx = count
else:
to_remove.append(item)
for item in to_remove:
self.remove(item)
self.total_qty = sum([d.qty for d in self.items])
|
from __future__ import absolute_import
import six
import string
from django.utils.encoding import force_text
from sentry.interfaces.base import Interface
from sentry.utils.json import prune_empty_keys
from sentry.utils.safe import get_path, trim
__all__ = ("Contexts",)
context_types = {}
class _IndexFormatter(string.Formatter):
def format_field(self, value, format_spec):
if not format_spec and isinstance(value, bool):
return value and "yes" or "no"
return string.Formatter.format_field(self, value, format_spec)
def format_index_expr(format_string, data):
return six.text_type(_IndexFormatter().vformat(six.text_type(format_string), (), data).strip())
def contexttype(cls):
context_types[cls.type] = cls
return cls
class ContextType(object):
indexed_fields = None
type = None
def __init__(self, alias, data):
self.alias = alias
ctx_data = {}
for key, value in six.iteritems(trim(data)):
# we use simple checks here, rathern than ' in set()' to avoid
# issues with maps/lists
if value is not None and value != "":
ctx_data[force_text(key)] = value
self.data = ctx_data
def to_json(self):
rv = dict(self.data)
rv["type"] = self.type
return prune_empty_keys(rv)
@classmethod
def values_for_data(cls, data):
rv = []
for context in six.itervalues(data.get("contexts") or {}):
if context and context.get("type") == cls.type:
rv.append(context)
return rv
@classmethod
def primary_value_for_data(cls, data):
val = get_path(data, "contexts", cls.type)
if val and val.get("type") == cls.type:
return val
rv = cls.values_for_data(data)
if len(rv) == 1:
return rv[0]
def iter_tags(self):
if self.indexed_fields:
for field, f_string in six.iteritems(self.indexed_fields):
try:
value = format_index_expr(f_string, self.data)
except KeyError:
continue
if value:
if not field:
yield (self.alias, value)
else:
yield ("%s.%s" % (self.alias, field), value)
# TODO(dcramer): contexts need to document/describe expected (optional) fields
@contexttype
class DefaultContextType(ContextType):
type = "default"
@contexttype
class AppContextType(ContextType):
type = "app"
indexed_fields = {"device": u"{device_app_hash}"}
@contexttype
class DeviceContextType(ContextType):
type = "device"
indexed_fields = {"": u"{model}", "family": u"{family}"}
# model_id, arch
@contexttype
class RuntimeContextType(ContextType):
type = "runtime"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
@contexttype
class BrowserContextType(ContextType):
type = "browser"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
# viewport
@contexttype
class OsContextType(ContextType):
type = "os"
indexed_fields = {"": u"{name} {version}", "name": u"{name}", "rooted": u"{rooted}"}
# build, rooted
@contexttype
class GpuContextType(ContextType):
type = "gpu"
indexed_fields = {"name": u"{name}", "vendor": u"{vendor_name}"}
@contexttype
class MonitorContextType(ContextType):
type = "monitor"
indexed_fields = {"id": u"{id}"}
@contexttype
class TraceContextType(ContextType):
type = "trace"
indexed_fields = {"": u"{trace_id}", "span": u"{span_id}", "ctx": u"{trace_id}-{span_id}"}
class Contexts(Interface):
"""
This interface stores context specific information.
"""
display_score = 1100
score = 800
@classmethod
def to_python(cls, data):
rv = {}
for alias, value in six.iteritems(data):
# XXX(markus): The `None`-case should be handled in the UI and
# other consumers of this interface
if value is not None:
rv[alias] = cls.normalize_context(alias, value)
return cls(**rv)
@classmethod
def normalize_context(cls, alias, data):
ctx_type = data.get("type", alias)
ctx_cls = context_types.get(ctx_type, DefaultContextType)
return ctx_cls(alias, data)
def iter_contexts(self):
return six.itervalues(self._data)
def to_json(self):
rv = {}
for alias, inst in six.iteritems(self._data):
rv[alias] = inst.to_json()
return rv
def iter_tags(self):
for inst in self.iter_contexts():
for tag in inst.iter_tags():
yield tag
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.