text
stringlengths 2
999k
|
|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-08 00:16
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import localflavor.us.models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
('stats', '0002_auto_20151109_0319'),
]
operations = [
migrations.CreateModel(
name='Commissioner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=254, verbose_name='Name of personnel')),
('role', models.CharField(max_length=100, verbose_name='Role of personnel')),
('email', models.EmailField(max_length=254, verbose_name='Email of personnel')),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=20, verbose_name='Phone number of personnel')),
('phone_number_extension', models.CharField(blank=True, default='', max_length=4, verbose_name='Phone number extension')),
('fax_number', phonenumber_field.modelfields.PhoneNumberField(max_length=20, verbose_name='Fax number of personnel')),
('fax_number_extension', models.CharField(blank=True, default='', max_length=4, verbose_name='Fax number extension')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', localflavor.us.models.USStateField(choices=[('AL', 'Alabama'), ('AK', 'Alaska'), ('AS', 'American Samoa'), ('AZ', 'Arizona'), ('AR', 'Arkansas'), ('AA', 'Armed Forces Americas'), ('AE', 'Armed Forces Europe'), ('AP', 'Armed Forces Pacific'), ('CA', 'California'), ('CO', 'Colorado'), ('CT', 'Connecticut'), ('DE', 'Delaware'), ('DC', 'District of Columbia'), ('FL', 'Florida'), ('GA', 'Georgia'), ('GU', 'Guam'), ('HI', 'Hawaii'), ('ID', 'Idaho'), ('IL', 'Illinois'), ('IN', 'Indiana'), ('IA', 'Iowa'), ('KS', 'Kansas'), ('KY', 'Kentucky'), ('LA', 'Louisiana'), ('ME', 'Maine'), ('MD', 'Maryland'), ('MA', 'Massachusetts'), ('MI', 'Michigan'), ('MN', 'Minnesota'), ('MS', 'Mississippi'), ('MO', 'Missouri'), ('MT', 'Montana'), ('NE', 'Nebraska'), ('NV', 'Nevada'), ('NH', 'New Hampshire'), ('NJ', 'New Jersey'), ('NM', 'New Mexico'), ('NY', 'New York'), ('NC', 'North Carolina'), ('ND', 'North Dakota'), ('MP', 'Northern Mariana Islands'), ('OH', 'Ohio'), ('OK', 'Oklahoma'), ('OR', 'Oregon'), ('PA', 'Pennsylvania'), ('PR', 'Puerto Rico'), ('RI', 'Rhode Island'), ('SC', 'South Carolina'), ('SD', 'South Dakota'), ('TN', 'Tennessee'), ('TX', 'Texas'), ('UT', 'Utah'), ('VT', 'Vermont'), ('VI', 'Virgin Islands'), ('VA', 'Virginia'), ('WA', 'Washington'), ('WV', 'West Virginia'), ('WI', 'Wisconsin'), ('WY', 'Wyoming')], max_length=2, verbose_name='State name')),
('slug', models.SlugField()),
('shape', django.contrib.gis.db.models.fields.MultiPolygonField(null=True, srid=4326, verbose_name='State shape')),
],
),
migrations.CreateModel(
name='StateStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('all_students_count', models.IntegerField(blank=True, null=True, verbose_name='Number of students')),
('african_american_count', models.IntegerField(blank=True, null=True, verbose_name='Number of African American students')),
('american_indian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of American Indian students')),
('asian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of Asian students')),
('hispanic_count', models.IntegerField(blank=True, null=True, verbose_name='Number of Hispanic students')),
('pacific_islander_count', models.IntegerField(blank=True, null=True, verbose_name='Number of Pacific Islander students')),
('two_or_more_races_count', models.IntegerField(blank=True, null=True, verbose_name='Number of Two or More Races students')),
('white_count', models.IntegerField(blank=True, null=True, verbose_name='Number of White students')),
('early_childhood_education_count', models.IntegerField(blank=True, null=True, verbose_name='Number of early childhood education students')),
('prek_count', models.IntegerField(blank=True, null=True, verbose_name='Number of pre-K students')),
('kindergarten_count', models.IntegerField(blank=True, null=True, verbose_name='Number of kindergarten students')),
('first_count', models.IntegerField(blank=True, null=True, verbose_name='Number of first grade students')),
('second_count', models.IntegerField(blank=True, null=True, verbose_name='Number of second grade students')),
('third_count', models.IntegerField(blank=True, null=True, verbose_name='Number of third grade students')),
('fourth_count', models.IntegerField(blank=True, null=True, verbose_name='Number of fourth grade students')),
('fifth_count', models.IntegerField(blank=True, null=True, verbose_name='Number of fifth grade students')),
('sixth_count', models.IntegerField(blank=True, null=True, verbose_name='Number of sixth grade students')),
('seventh_count', models.IntegerField(blank=True, null=True, verbose_name='Number of seventh grade students')),
('eighth_count', models.IntegerField(blank=True, null=True, verbose_name='Number of eigth grade students')),
('ninth_count', models.IntegerField(blank=True, null=True, verbose_name='Number of ninth grade students')),
('tenth_count', models.IntegerField(blank=True, null=True, verbose_name='Number of tenth grade students')),
('eleventh_count', models.IntegerField(blank=True, null=True, verbose_name='Number of eleventh grade students')),
('twelfth_count', models.IntegerField(blank=True, null=True, verbose_name='Number of twelfth grade students')),
('at_risk_count', models.IntegerField(blank=True, null=True, verbose_name='Number of at risk students')),
('economically_disadvantaged_count', models.IntegerField(blank=True, null=True, verbose_name='Number of economically disadvantaged students')),
('limited_english_proficient_count', models.IntegerField(blank=True, null=True, verbose_name='Number of limited English proficient students')),
('african_american_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of African American students')),
('american_indian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of American Indian students')),
('asian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of Asian students')),
('hispanic_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of Hispanic students')),
('pacific_islander_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of Pacific Islander students')),
('two_or_more_races_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of Two or More Races students')),
('white_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of White students')),
('early_childhood_education_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of early childhood education students')),
('prek_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of pre-K students')),
('kindergarten_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of kindergarten students')),
('first_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of first grade students')),
('second_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of second grade students')),
('third_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of third grade students')),
('fourth_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of fourth grade students')),
('fifth_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of fifth grade students')),
('sixth_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of sixth grade students')),
('seventh_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of seventh grade students')),
('eighth_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of eighth grade students')),
('ninth_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of ninth grade students')),
('tenth_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of tenth grade students')),
('eleventh_percent', models.FloatField(null=True, verbose_name='Percent of eleventh grade students')),
('twelfth_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of twelfth grade students')),
('at_risk_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of at risk students')),
('economically_disadvantaged_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of economically disadvantaged students')),
('limited_english_proficient_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of limited English proficient students')),
('bilingual_esl_count', models.IntegerField(blank=True, null=True, verbose_name='Number of students enrolled in bilingual/ESL program')),
('career_technical_education_count', models.IntegerField(blank=True, null=True, verbose_name='Number of students enrolled in career and technical education program')),
('gifted_and_talented_count', models.IntegerField(blank=True, null=True, verbose_name='Number of students enrolled in gifted and talented program')),
('special_education_count', models.IntegerField(blank=True, null=True, verbose_name='Number of students enrolled in special education program')),
('bilingual_esl_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of students enrolled in bilingual/ESL program')),
('career_technical_education_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of students enrolled in career and technical education program')),
('gifted_and_talented_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of students enrolled in gifted and talented program')),
('special_education_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of students enrolled in special education program')),
('class_size_avg_kindergarten', models.FloatField(blank=True, null=True, verbose_name='Average kindergarten grade class size')),
('class_size_avg_first', models.FloatField(blank=True, null=True, verbose_name='Average first grade class size')),
('class_size_avg_second', models.FloatField(blank=True, null=True, verbose_name='Average second grade class size')),
('class_size_avg_third', models.FloatField(blank=True, null=True, verbose_name='Average third grade class size')),
('class_size_avg_fourth', models.FloatField(blank=True, null=True, verbose_name='Average fourth grade class size')),
('class_size_avg_fifth', models.FloatField(blank=True, null=True, verbose_name='Average fifth grade class size')),
('class_size_avg_sixth', models.FloatField(blank=True, null=True, verbose_name='Average sixth grade class size')),
('class_size_avg_mixed_elementary', models.FloatField(blank=True, null=True, verbose_name='Average mixed elementary class size')),
('class_size_avg_secondary_english', models.FloatField(blank=True, null=True, verbose_name='Average secondary English class size')),
('class_size_avg_secondary_foreign_language', models.FloatField(blank=True, null=True, verbose_name='Average secondary foreign language class size')),
('class_size_avg_secondary_math', models.FloatField(blank=True, null=True, verbose_name='Average secondary math class size')),
('class_size_avg_secondary_science', models.FloatField(blank=True, null=True, verbose_name='Average secondary science class size')),
('class_size_avg_secondary_social_studies', models.FloatField(blank=True, null=True, verbose_name='Average secondary social studies class size')),
('students_per_teacher', models.FloatField(blank=True, null=True, verbose_name='Number of students per teacher')),
('teacher_avg_tenure', models.FloatField(blank=True, null=True, verbose_name='Average tenure of teachers at entity')),
('teacher_avg_experience', models.FloatField(blank=True, null=True, verbose_name='Average years of experience at entity')),
('teacher_avg_base_salary', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Average teacher salary at entity')),
('teacher_avg_beginning_salary', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Average teacher beginning salary at entity')),
('teacher_avg_1_to_5_year_salary', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Average salary for teachers with 1-5 years experience')),
('teacher_avg_6_to_10_year_salary', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Average salary for teachers with 6-10 years experience')),
('teacher_avg_11_to_20_year_salary', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Average salary for teachers with 11-20 years experience')),
('teacher_avg_20_plus_year_salary', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Average salary for teachers with over 20 years experience')),
('college_ready_graduates_english_all_students_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready students in English')),
('college_ready_graduates_english_african_american_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready African American graduates in English')),
('college_ready_graduates_english_asian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Asian graduates in English')),
('college_ready_graduates_english_hispanic_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Hispanic graduates in English')),
('college_ready_graduates_english_american_indian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready American Indian graduates in English')),
('college_ready_graduates_english_pacific_islander_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Pacific Islander graduates in English')),
('college_ready_graduates_english_two_or_more_races_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready graduates of two or more races in English')),
('college_ready_graduates_english_white_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready white graduates in English')),
('college_ready_graduates_english_economically_disadvantaged_count', models.IntegerField(blank=True, db_column='college_ready_graduates_english_econ_disadv_count', null=True, verbose_name='Number of college ready economically disadvantaged graduates in English')),
('college_ready_graduates_english_limited_english_proficient_count', models.IntegerField(blank=True, db_column='college_ready_graduates_english_lep_count', null=True, verbose_name='Number of college ready limited english proficient graduates in English')),
('college_ready_graduates_english_at_risk_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready at risk graduates in English')),
('college_ready_graduates_math_all_students_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready students in math')),
('college_ready_graduates_math_african_american_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready African American graduates in math')),
('college_ready_graduates_math_asian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Asian graduates in math')),
('college_ready_graduates_math_hispanic_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Hispanic graduates in math')),
('college_ready_graduates_math_american_indian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready American Indian graduates in math')),
('college_ready_graduates_math_pacific_islander_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Pacific Islander graduates in math')),
('college_ready_graduates_math_two_or_more_races_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready graduages of two or more races in math')),
('college_ready_graduates_math_white_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready white graduates in math')),
('college_ready_graduates_math_economically_disadvantaged_count', models.IntegerField(blank=True, db_column='college_ready_graduates_math_econ_disadv_count', null=True, verbose_name='Number of college ready economically disadvantaged graduates in math')),
('college_ready_graduates_math_limited_english_proficient_count', models.IntegerField(blank=True, db_column='college_ready_graduates_math_lep_count', null=True, verbose_name='Number of college ready limited english proficient graduates in math')),
('college_ready_graduates_math_at_risk_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready at risk graduates in math')),
('college_ready_graduates_both_all_students_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready graduates in both subjects')),
('college_ready_graduates_both_african_american_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready african american graduates in both subjects')),
('college_ready_graduates_both_asian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Asian graduates in both subjects')),
('college_ready_graduates_both_hispanic_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Hispanic graduates in both subjects')),
('college_ready_graduates_both_american_indian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready American Indian graduates in both subjects')),
('college_ready_graduates_both_pacific_islander_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready Pacific Islander graduates in both subjects')),
('college_ready_graduates_both_two_or_more_races_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready graduates of two or more races in both subjects')),
('college_ready_graduates_both_white_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready white graduates in both subjects')),
('college_ready_graduates_both_economically_disadvantaged_count', models.IntegerField(blank=True, db_column='college_ready_graduates_both_econ_disadv_count', null=True, verbose_name='Number of college ready economically disadvantaged graduates in both subjects')),
('college_ready_graduates_both_limited_english_proficient_count', models.IntegerField(blank=True, db_column='college_ready_graduates_both_lep_count', null=True, verbose_name='Number of college ready limited english proficient graduates in both subjects')),
('college_ready_graduates_both_at_risk_count', models.IntegerField(blank=True, null=True, verbose_name='Number of college ready at risk graduates in both')),
('college_ready_graduates_english_all_students_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready graduates in English')),
('college_ready_graduates_english_african_american_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready African American graduates in English')),
('college_ready_graduates_english_asian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Asian graduates in English')),
('college_ready_graduates_english_hispanic_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Hispanic graduates in English')),
('college_ready_graduates_english_american_indian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready American Indian graduates in english')),
('college_ready_graduates_english_pacific_islander_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Pacific Islander graduates in English')),
('college_ready_graduates_english_two_or_more_races_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready graduates of two or more races in English')),
('college_ready_graduates_english_white_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready white graduates in English')),
('college_ready_graduates_english_economically_disadvantaged_percent', models.FloatField(blank=True, db_column='college_ready_graduates_english_econ_disadv_percent', null=True, verbose_name='Percent of college ready economically disadvantaged graduates in English')),
('college_ready_graduates_english_limited_english_proficient_percent', models.FloatField(blank=True, db_column='college_ready_graduates_english_lep_percent', null=True, verbose_name='Percent of college ready limited english proficient graduates in English')),
('college_ready_graduates_english_at_risk_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready at risk graduates in English')),
('college_ready_graduates_math_all_students_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready students in math')),
('college_ready_graduates_math_african_american_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready African American graduates in math')),
('college_ready_graduates_math_asian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Asian graduates in math')),
('college_ready_graduates_math_hispanic_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Hispanic graduates in math')),
('college_ready_graduates_math_american_indian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready American Indian graduates in math')),
('college_ready_graduates_math_pacific_islander_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Pacific Islander graduates in math')),
('college_ready_graduates_math_two_or_more_races_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready graduages of two or more races in math')),
('college_ready_graduates_math_white_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready white graduates in math')),
('college_ready_graduates_math_economically_disadvantaged_percent', models.FloatField(blank=True, db_column='college_ready_graduates_math_econ_disadv_percent', null=True, verbose_name='Percent of college ready economically disadvantaged graduates in math')),
('college_ready_graduates_math_limited_english_proficient_percent', models.FloatField(blank=True, db_column='college_ready_graduates_math_lep_percent', null=True, verbose_name='Percent of college ready limited english proficient graduates in math')),
('college_ready_graduates_math_at_risk_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready at risk graduates in math')),
('college_ready_graduates_both_all_students_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready students in both subjects')),
('college_ready_graduates_both_african_american_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready african american graduates in both subjects')),
('college_ready_graduates_both_asian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Asian graduates in both subjects')),
('college_ready_graduates_both_hispanic_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Hispanic graduates in both subjects')),
('college_ready_graduates_both_american_indian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready American Indian graduates in both subjects')),
('college_ready_graduates_both_pacific_islander_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready Pacific Islander graduates in both subjects')),
('college_ready_graduates_both_two_or_more_races_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready graduates of two or more races in both subjects')),
('college_ready_graduates_both_white_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready white graduates in both subjects')),
('college_ready_graduates_both_economically_disadvantaged_percent', models.FloatField(blank=True, db_column='college_ready_graduates_both_econ_disadv_percent', null=True, verbose_name='Percent of college ready economically disadvantaged graduates in both subjects')),
('college_ready_graduates_both_limited_english_proficient_percent', models.FloatField(blank=True, db_column='college_ready_graduates_both_lep_percent', null=True, verbose_name='Percent of college ready limited english proficient graduates in both subjects')),
('college_ready_graduates_both_at_risk_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of college ready at risk graduates in both subjects')),
('avg_sat_score_all_students', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for all students')),
('avg_sat_score_african_american', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for African American students')),
('avg_sat_score_asian', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for Asian students')),
('avg_sat_score_hispanic', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for Hispanic students')),
('avg_sat_score_american_indian', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for American Indian students')),
('avg_sat_score_pacific_islander', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for Pacific Islander students')),
('avg_sat_score_two_or_more_races', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for students of two or more races')),
('avg_sat_score_white', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for white students')),
('avg_sat_score_economically_disadvantaged', models.IntegerField(blank=True, null=True, verbose_name='Average SAT score for economically disadvantaged students')),
('avg_act_score_all_students', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for all students')),
('avg_act_score_african_american', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for African American students')),
('avg_act_score_asian', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for Asian students')),
('avg_act_score_hispanic', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for Hispanic students')),
('avg_act_score_american_indian', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for American Indian students')),
('avg_act_score_pacific_islander', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for Pacific Islander students')),
('avg_act_score_two_or_more_races', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for students of two or more races')),
('avg_act_score_white', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for white students')),
('avg_act_score_economically_disadvantaged', models.FloatField(blank=True, null=True, verbose_name='Average ACT score for economically disadvantaged students')),
('ap_ib_all_students_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of students who scored above criterion onat least one AP or IB test')),
('ap_ib_african_american_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of African American students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_asian_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of Asian students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_hispanic_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of Hispanic students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_american_indian_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of American Insian students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_pacific_islander_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of Pacific Islander students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_two_or_more_races_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of students of two or more races who scored abovecriterion on at least one AP or IB test')),
('ap_ib_white_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of white students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_economically_disadvantaged_count_above_criterion', models.IntegerField(blank=True, null=True, verbose_name='Number of white students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_all_students_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of students who scored above criterion onat least one AP or IB test')),
('ap_ib_african_american_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of African American students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_asian_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of Asian students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_hispanic_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of Hispanic students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_american_indian_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of American Insian students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_pacific_islander_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of Pacific Islander students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_two_or_more_races_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of students of two or more races who scored abovecriterion on at least one AP or IB test')),
('ap_ib_white_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of white students who scored abovecriterion on at least one AP or IB test')),
('ap_ib_economically_disadvantaged_percent_above_criterion', models.FloatField(blank=True, null=True, verbose_name='Percent of economically disadvantaged students whoscored above criterion on at least one AP or IB test')),
('ap_ib_all_students_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of students taking at least oneAP or IB test')),
('ap_ib_african_american_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of African American students taking at least oneAP or IB test')),
('ap_ib_asian_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of Asian students taking at least oneAP or IB test')),
('ap_ib_hispanic_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of Hispanic students taking at least oneAP or IB test')),
('ap_ib_american_indian_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of American Insian students taking at least oneAP or IB test')),
('ap_ib_pacific_islander_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of Pacific Islander students taking at least oneAP or IB test')),
('ap_ib_two_or_more_races_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of students of two or more races taking at least oneAP or IB test')),
('ap_ib_white_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of white students taking at least oneAP or IB test')),
('ap_ib_economically_disadvantaged_percent_taking', models.FloatField(blank=True, null=True, verbose_name='Percent of economically disadvantaged students takingat least one AP or IB test')),
('dropout_all_students_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 students who dropped out')),
('dropout_african_american_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 African American students who dropped out')),
('dropout_american_indian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 American Indian students who dropped out')),
('dropout_asian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 Asian students who dropped out')),
('dropout_hispanic_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 Hispanic students who dropped out')),
('dropout_pacific_islander_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 Pacific Islander students who dropped out')),
('dropout_two_or_more_races_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 students of two or more races who dropped out')),
('dropout_white_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 white students who dropped out')),
('dropout_at_risk_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 white students who dropped out')),
('dropout_economically_disadvantaged_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 economically disadvantaged students who dropped out')),
('dropout_limited_english_proficient_count', models.IntegerField(blank=True, null=True, verbose_name='Number of 9-12 limited English proficient students who dropped out')),
('dropout_all_students_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 students who dropped out')),
('dropout_african_american_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 African American students who dropped out')),
('dropout_american_indian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 American Indian students who dropped out')),
('dropout_asian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 Asian students who dropped out')),
('dropout_hispanic_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 Hispanic students who dropped out')),
('dropout_pacific_islander_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 Pacific Islander students who dropped out')),
('dropout_two_or_more_races_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 students of two or more races who dropped out')),
('dropout_white_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 white students who dropped out')),
('dropout_at_risk_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 at risk students who dropped out')),
('dropout_economically_disadvantaged_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 economically disadvantaged students who dropped out')),
('dropout_limited_english_proficient_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of 9-12 limited English proficient students who dropped out')),
('four_year_graduate_all_students_count', models.IntegerField(blank=True, null=True, verbose_name='Number of students who graduated in 4 years')),
('four_year_graduate_african_american_count', models.IntegerField(blank=True, null=True, verbose_name='Number of African American students who graduated in 4 years')),
('four_year_graduate_american_indian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of American Indian students who graduated in 4 years')),
('four_year_graduate_asian_count', models.IntegerField(blank=True, null=True, verbose_name='Number of Asian students who graduated in 4 years')),
('four_year_graduate_hispanic_count', models.IntegerField(blank=True, null=True, verbose_name='Number of Hispanic students who graduated in 4 years')),
('four_year_graduate_pacific_islander_count', models.IntegerField(blank=True, null=True, verbose_name='Number of Pacific Islander students who graduated in 4 years')),
('four_year_graduate_two_or_more_races_count', models.IntegerField(blank=True, null=True, verbose_name='Number of students of two or more races who graduated in 4 years')),
('four_year_graduate_white_count', models.IntegerField(blank=True, null=True, verbose_name='Number of white students who graduated in 4 years')),
('four_year_graduate_at_risk_count', models.IntegerField(blank=True, null=True, verbose_name='Number of at risk students who graduated in 4 years')),
('four_year_graduate_economically_disadvantaged_count', models.IntegerField(blank=True, null=True, verbose_name='Number of economically disadvantaged students who graduated in 4 years')),
('four_year_graduate_limited_english_proficient_count', models.IntegerField(blank=True, null=True, verbose_name='Number of limited English proficient students who graduated in 4 years')),
('four_year_graduate_all_students_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of students who graduated in 4 years')),
('four_year_graduate_african_american_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of African American students who graduated in 4 years')),
('four_year_graduate_american_indian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of American Indian students who graduated in 4 years')),
('four_year_graduate_asian_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of Asian students who graduated in 4 years')),
('four_year_graduate_hispanic_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of Hispanic students who graduated in 4 years')),
('four_year_graduate_pacific_islander_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of Pacific Islander students who graduated in 4 years')),
('four_year_graduate_two_or_more_races_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of students of two or more races who graduated in 4 years')),
('four_year_graduate_white_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of white students who graduated in 4 years')),
('four_year_graduate_at_risk_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of at risk students who graduated in 4 years')),
('four_year_graduate_economically_disadvantaged_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of economically disadvantaged students who graduated in 4 years')),
('four_year_graduate_limited_english_proficient_percent', models.FloatField(blank=True, null=True, verbose_name='Percent of limited English proficient students who graduated in 4 years')),
('attendance_rate', models.FloatField(blank=True, null=True, verbose_name='Attendance rate as calculated by students present over students in membership')),
('state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stats', to='states.State')),
('year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='state_stats', to='stats.SchoolYear')),
],
options={
'verbose_name_plural': 'State stats',
},
),
migrations.AddField(
model_name='commissioner',
name='state',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='commissioner_of', to='states.State'),
),
migrations.AlterUniqueTogether(
name='statestats',
unique_together=set([('state', 'year')]),
),
]
|
from . import units
from . import tools
import math
from .basedata import BaseData
class Swell(BaseData):
def __init__(self, unit, wave_height=float('nan'), period=float('nan'), direction=float('nan'), compass_direction=None, max_energy = 0, frequency_index = 0):
super(Swell, self).__init__(unit)
self.wave_height = wave_height
self.period = period
if not math.isnan(direction):
self.direction = direction
self.compass_direction = units.degree_to_direction(direction)
elif compass_direction is not None:
self.compass_direction = compass_direction
self.direction = units.direction_to_degree(compass_direction)
else:
self.direction = float('nan')
self.compass_direction = ''
self.max_energy = max_energy
self.frequency_index = frequency_index
@property
def summary(self):
return '{0:.1f} {1} @ {2:.1f} s {3:.0f}\xb0 {4}'.format(self.wave_height, units.unit_name(self.unit,
units.Measurement.length), self.period, self.direction, self.compass_direction)
def is_valid(self):
return not math.isnan(self.wave_height) and not math.isnan(self.period) and len(self.compass_direction) > 0 and not math.isnan(self.direction)
def change_units(self, new_units):
old_units = self.unit
super(Swell, self).change_units(new_units)
self.wave_height = units.convert(self.wave_height, units.Measurement.length, old_units, self.unit)
def breaking_wave_estimate(self, beach_angle, depth, beach_slope):
# Interpolates the approximate breaking wave heights using the contained swell data. Data must
# be in metric units prior to calling this function. The depth argument must be in meters.
if self.is_valid() is not True:
return
self.change_units(units.Units.metric)
wave_breaking_height = 0.0
if self.wave_height < 1000:
incident_angle = abs(self.direction - beach_angle) % 360
if incident_angle < 90:
wave_breaking_height, _ = tools.breaking_characteristics(self.period, incident_angle, self.wave_height, beach_slope, depth)
# Take the maximum breaking height and give it a scale factor of 0.9 for refraction
# or anything we are not checking for.
breaking_height = 0.8 * wave_breaking_height
# For now assume this is significant wave height as the max and the rms as the min
maximum_break_height = breaking_height
minimum_break_height = breaking_height / 1.4
return minimum_break_height, maximum_break_height
|
"""
Jordi explained that a recursive search may not work as you might
first follow an extremely long path.
Thus, the process should be done by levels
"""
import os
from collections import defaultdict
class Computer:
def __init__(self):
self.operations = {'cpy': self.copy, 'inc': self.add, 'dec': self.subs, 'jnz': self.jump}
self.registers = defaultdict(int)
self.instruction = 0
def run(self, program):
while self.instruction < len(program):
values = program[self.instruction].split(' ')
self.operations[values[0]](*(values[1:]))
self.instruction += 1
def get_val(self, v):
try:
return int(v)
except ValueError:
return self.registers[v]
def copy(self, value, register):
self.registers[register] = self.get_val(value)
def add(self, register):
self.registers[register] += 1
def subs(self, register):
self.registers[register] -= 1
def jump(self, register, amount):
if self.get_val(register) != 0:
self.instruction += (int(amount) - 1)
def get(self, register=None):
if register is None:
return self.registers
else:
return self.registers[register]
if __name__ == '__main__':
dir = os.path.dirname(__file__)
file = os.path.join(dir, 'input.txt')
program = []
with open(file) as fd:
for line in fd:
program.append(line.strip())
computer = Computer()
computer.run(program)
print('Part 1:', computer.get('a'))
program = ['cpy 1 c'] + program
computer = Computer()
computer.run(program)
print('Part 2:', computer.get('a'))
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
from tempfile import mkdtemp
from shutil import rmtree
from ducktape.template import TemplateRenderer
from kafkatest.services.security.minikdc import MiniKdc
from kafkatest.services.security.listener_security_config import ListenerSecurityConfig
import itertools
class SslStores(object):
def __init__(self, local_scratch_dir, logger=None):
self.logger = logger
self.ca_crt_path = os.path.join(local_scratch_dir, "test.ca.crt")
self.ca_jks_path = os.path.join(local_scratch_dir, "test.ca.jks")
self.ca_passwd = "test-ca-passwd"
self.truststore_path = os.path.join(local_scratch_dir, "test.truststore.jks")
self.truststore_passwd = "test-ts-passwd"
self.keystore_passwd = "test-ks-passwd"
# Zookeeper TLS (as of v3.5.6) does not support a key password different than the keystore password
self.key_passwd = self.keystore_passwd
# Allow upto one hour of clock skew between host and VMs
self.startdate = "-1H"
for file in [self.ca_crt_path, self.ca_jks_path, self.truststore_path]:
if os.path.exists(file):
os.remove(file)
def generate_ca(self):
"""
Generate CA private key and certificate.
"""
self.runcmd("keytool -genkeypair -alias ca -keyalg RSA -keysize 2048 -keystore %s -storetype JKS -storepass %s -keypass %s -dname CN=SystemTestCA -startdate %s --ext bc=ca:true" % (self.ca_jks_path, self.ca_passwd, self.ca_passwd, self.startdate))
self.runcmd("keytool -export -alias ca -keystore %s -storepass %s -storetype JKS -rfc -file %s" % (self.ca_jks_path, self.ca_passwd, self.ca_crt_path))
def generate_truststore(self):
"""
Generate JKS truststore containing CA certificate.
"""
self.runcmd("keytool -importcert -alias ca -file %s -keystore %s -storepass %s -storetype JKS -noprompt" % (self.ca_crt_path, self.truststore_path, self.truststore_passwd))
def generate_and_copy_keystore(self, node):
"""
Generate JKS keystore with certificate signed by the test CA.
The generated certificate has the node's hostname as a DNS SubjectAlternativeName.
"""
ks_dir = mkdtemp(dir="/tmp")
ks_path = os.path.join(ks_dir, "test.keystore.jks")
csr_path = os.path.join(ks_dir, "test.kafka.csr")
crt_path = os.path.join(ks_dir, "test.kafka.crt")
self.runcmd("keytool -genkeypair -alias kafka -keyalg RSA -keysize 2048 -keystore %s -storepass %s -storetype JKS -keypass %s -dname CN=systemtest -ext SAN=DNS:%s -startdate %s" % (ks_path, self.keystore_passwd, self.key_passwd, self.hostname(node), self.startdate))
self.runcmd("keytool -certreq -keystore %s -storepass %s -storetype JKS -keypass %s -alias kafka -file %s" % (ks_path, self.keystore_passwd, self.key_passwd, csr_path))
self.runcmd("keytool -gencert -keystore %s -storepass %s -storetype JKS -alias ca -infile %s -outfile %s -dname CN=systemtest -ext SAN=DNS:%s -startdate %s" % (self.ca_jks_path, self.ca_passwd, csr_path, crt_path, self.hostname(node), self.startdate))
self.runcmd("keytool -importcert -keystore %s -storepass %s -storetype JKS -alias ca -file %s -noprompt" % (ks_path, self.keystore_passwd, self.ca_crt_path))
self.runcmd("keytool -importcert -keystore %s -storepass %s -storetype JKS -keypass %s -alias kafka -file %s -noprompt" % (ks_path, self.keystore_passwd, self.key_passwd, crt_path))
node.account.copy_to(ks_path, SecurityConfig.KEYSTORE_PATH)
# generate ZooKeeper client TLS config file for encryption-only (no client cert) use case
str = """zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.client.enable=true
zookeeper.ssl.truststore.location=%s
zookeeper.ssl.truststore.password=%s
""" % (SecurityConfig.TRUSTSTORE_PATH, self.truststore_passwd)
node.account.create_file(SecurityConfig.ZK_CLIENT_TLS_ENCRYPT_ONLY_CONFIG_PATH, str)
# also generate ZooKeeper client TLS config file for mutual authentication use case
str = """zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.client.enable=true
zookeeper.ssl.truststore.location=%s
zookeeper.ssl.truststore.password=%s
zookeeper.ssl.keystore.location=%s
zookeeper.ssl.keystore.password=%s
""" % (SecurityConfig.TRUSTSTORE_PATH, self.truststore_passwd, SecurityConfig.KEYSTORE_PATH, self.keystore_passwd)
node.account.create_file(SecurityConfig.ZK_CLIENT_MUTUAL_AUTH_CONFIG_PATH, str)
rmtree(ks_dir)
def hostname(self, node):
""" Hostname which may be overridden for testing validation failures
"""
return node.account.hostname
def runcmd(self, cmd):
if self.logger:
self.logger.log(logging.DEBUG, cmd)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Command '%s' returned non-zero exit status %d: %s" % (cmd, proc.returncode, stdout))
class SecurityConfig(TemplateRenderer):
PLAINTEXT = 'PLAINTEXT'
SSL = 'SSL'
SASL_PLAINTEXT = 'SASL_PLAINTEXT'
SASL_SSL = 'SASL_SSL'
SASL_MECHANISM_GSSAPI = 'GSSAPI'
SASL_MECHANISM_PLAIN = 'PLAIN'
SASL_MECHANISM_SCRAM_SHA_256 = 'SCRAM-SHA-256'
SASL_MECHANISM_SCRAM_SHA_512 = 'SCRAM-SHA-512'
SCRAM_CLIENT_USER = "kafka-client"
SCRAM_CLIENT_PASSWORD = "client-secret"
SCRAM_BROKER_USER = "kafka-broker"
SCRAM_BROKER_PASSWORD = "broker-secret"
CONFIG_DIR = "/mnt/security"
KEYSTORE_PATH = "/mnt/security/test.keystore.jks"
TRUSTSTORE_PATH = "/mnt/security/test.truststore.jks"
ZK_CLIENT_TLS_ENCRYPT_ONLY_CONFIG_PATH = "/mnt/security/zk_client_tls_encrypt_only_config.properties"
ZK_CLIENT_MUTUAL_AUTH_CONFIG_PATH = "/mnt/security/zk_client_mutual_auth_config.properties"
JAAS_CONF_PATH = "/mnt/security/jaas.conf"
KRB5CONF_PATH = "/mnt/security/krb5.conf"
KEYTAB_PATH = "/mnt/security/keytab"
# This is initialized only when the first instance of SecurityConfig is created
ssl_stores = None
def __init__(self, context, security_protocol=None, interbroker_security_protocol=None,
client_sasl_mechanism=SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SASL_MECHANISM_GSSAPI,
zk_sasl=False, zk_tls=False, template_props="", static_jaas_conf=True, jaas_override_variables=None,
listener_security_config=ListenerSecurityConfig()):
"""
Initialize the security properties for the node and copy
keystore and truststore to the remote node if the transport protocol
is SSL. If security_protocol is None, the protocol specified in the
template properties file is used. If no protocol is specified in the
template properties either, PLAINTEXT is used as default.
"""
self.context = context
if not SecurityConfig.ssl_stores:
# This generates keystore/trustore files in a local scratch directory which gets
# automatically destroyed after the test is run
# Creating within the scratch directory allows us to run tests in parallel without fear of collision
SecurityConfig.ssl_stores = SslStores(context.local_scratch_dir, context.logger)
SecurityConfig.ssl_stores.generate_ca()
SecurityConfig.ssl_stores.generate_truststore()
if security_protocol is None:
security_protocol = self.get_property('security.protocol', template_props)
if security_protocol is None:
security_protocol = SecurityConfig.PLAINTEXT
elif security_protocol not in [SecurityConfig.PLAINTEXT, SecurityConfig.SSL, SecurityConfig.SASL_PLAINTEXT, SecurityConfig.SASL_SSL]:
raise Exception("Invalid security.protocol in template properties: " + security_protocol)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
self.has_sasl = self.is_sasl(security_protocol) or self.is_sasl(interbroker_security_protocol) or zk_sasl
self.has_ssl = self.is_ssl(security_protocol) or self.is_ssl(interbroker_security_protocol) or zk_tls
self.zk_sasl = zk_sasl
self.zk_tls = zk_tls
self.static_jaas_conf = static_jaas_conf
self.listener_security_config = listener_security_config
self.properties = {
'security.protocol' : security_protocol,
'ssl.keystore.location' : SecurityConfig.KEYSTORE_PATH,
'ssl.keystore.password' : SecurityConfig.ssl_stores.keystore_passwd,
'ssl.key.password' : SecurityConfig.ssl_stores.key_passwd,
'ssl.truststore.location' : SecurityConfig.TRUSTSTORE_PATH,
'ssl.truststore.password' : SecurityConfig.ssl_stores.truststore_passwd,
'ssl.endpoint.identification.algorithm' : 'HTTPS',
'sasl.mechanism' : client_sasl_mechanism,
'sasl.mechanism.inter.broker.protocol' : interbroker_sasl_mechanism,
'sasl.kerberos.service.name' : 'kafka'
}
self.properties.update(self.listener_security_config.client_listener_overrides)
self.jaas_override_variables = jaas_override_variables or {}
def client_config(self, template_props="", node=None, jaas_override_variables=None):
# If node is not specified, use static jaas config which will be created later.
# Otherwise use static JAAS configuration files with SASL_SSL and sasl.jaas.config
# property with SASL_PLAINTEXT so that both code paths are tested by existing tests.
# Note that this is an artibtrary choice and it is possible to run all tests with
# either static or dynamic jaas config files if required.
static_jaas_conf = node is None or (self.has_sasl and self.has_ssl)
return SecurityConfig(self.context, self.security_protocol,
client_sasl_mechanism=self.client_sasl_mechanism,
template_props=template_props,
static_jaas_conf=static_jaas_conf,
jaas_override_variables=jaas_override_variables,
listener_security_config=self.listener_security_config)
def enable_security_protocol(self, security_protocol):
self.has_sasl = self.has_sasl or self.is_sasl(security_protocol)
self.has_ssl = self.has_ssl or self.is_ssl(security_protocol)
def setup_ssl(self, node):
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
node.account.copy_to(SecurityConfig.ssl_stores.truststore_path, SecurityConfig.TRUSTSTORE_PATH)
SecurityConfig.ssl_stores.generate_and_copy_keystore(node)
def setup_sasl(self, node):
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
jaas_conf_file = "jaas.conf"
java_version = node.account.ssh_capture("java -version")
jaas_conf = None
if 'sasl.jaas.config' not in self.properties:
jaas_conf = self.render_jaas_config(
jaas_conf_file,
{
'node': node,
'is_ibm_jdk': any('IBM' in line for line in java_version),
'SecurityConfig': SecurityConfig,
'client_sasl_mechanism': self.client_sasl_mechanism,
'enabled_sasl_mechanisms': self.enabled_sasl_mechanisms
}
)
else:
jaas_conf = self.properties['sasl.jaas.config']
if self.static_jaas_conf:
node.account.create_file(SecurityConfig.JAAS_CONF_PATH, jaas_conf)
elif 'sasl.jaas.config' not in self.properties:
self.properties['sasl.jaas.config'] = jaas_conf.replace("\n", " \\\n")
if self.has_sasl_kerberos:
node.account.copy_to(MiniKdc.LOCAL_KEYTAB_FILE, SecurityConfig.KEYTAB_PATH)
node.account.copy_to(MiniKdc.LOCAL_KRB5CONF_FILE, SecurityConfig.KRB5CONF_PATH)
def render_jaas_config(self, jaas_conf_file, config_variables):
"""
Renders the JAAS config file contents
:param jaas_conf_file: name of the JAAS config template file
:param config_variables: dict of variables used in the template
:return: the rendered template string
"""
variables = config_variables.copy()
variables.update(self.jaas_override_variables) # override variables
return self.render(jaas_conf_file, **variables)
def setup_node(self, node):
if self.has_ssl:
self.setup_ssl(node)
if self.has_sasl:
self.setup_sasl(node)
def setup_credentials(self, node, path, zk_connect, broker):
if broker:
self.maybe_create_scram_credentials(node, zk_connect, path, self.interbroker_sasl_mechanism,
SecurityConfig.SCRAM_BROKER_USER, SecurityConfig.SCRAM_BROKER_PASSWORD)
else:
self.maybe_create_scram_credentials(node, zk_connect, path, self.client_sasl_mechanism,
SecurityConfig.SCRAM_CLIENT_USER, SecurityConfig.SCRAM_CLIENT_PASSWORD)
def maybe_create_scram_credentials(self, node, zk_connect, path, mechanism, user_name, password):
if self.has_sasl and self.is_sasl_scram(mechanism):
cmd = "%s --zookeeper %s --entity-name %s --entity-type users --alter --add-config %s=[password=%s]" % \
(path.script("kafka-configs.sh", node), zk_connect,
user_name, mechanism, password)
node.account.ssh(cmd)
def clean_node(self, node):
if self.security_protocol != SecurityConfig.PLAINTEXT:
node.account.ssh("rm -rf %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
def get_property(self, prop_name, template_props=""):
"""
Get property value from the string representation of
a properties file.
"""
value = None
for line in template_props.split("\n"):
items = line.split("=")
if len(items) == 2 and items[0].strip() == prop_name:
value = str(items[1].strip())
return value
def is_ssl(self, security_protocol):
return security_protocol == SecurityConfig.SSL or security_protocol == SecurityConfig.SASL_SSL
def is_sasl(self, security_protocol):
return security_protocol == SecurityConfig.SASL_PLAINTEXT or security_protocol == SecurityConfig.SASL_SSL
def is_sasl_scram(self, sasl_mechanism):
return sasl_mechanism == SecurityConfig.SASL_MECHANISM_SCRAM_SHA_256 or sasl_mechanism == SecurityConfig.SASL_MECHANISM_SCRAM_SHA_512
@property
def security_protocol(self):
return self.properties['security.protocol']
@property
def client_sasl_mechanism(self):
return self.properties['sasl.mechanism']
@property
def interbroker_sasl_mechanism(self):
return self.properties['sasl.mechanism.inter.broker.protocol']
@property
def enabled_sasl_mechanisms(self):
return set([self.client_sasl_mechanism, self.interbroker_sasl_mechanism])
@property
def has_sasl_kerberos(self):
return self.has_sasl and (SecurityConfig.SASL_MECHANISM_GSSAPI in self.enabled_sasl_mechanisms)
@property
def kafka_opts(self):
if self.has_sasl:
if self.static_jaas_conf:
return "\"-Djava.security.auth.login.config=%s -Djava.security.krb5.conf=%s\"" % (SecurityConfig.JAAS_CONF_PATH, SecurityConfig.KRB5CONF_PATH)
else:
return "\"-Djava.security.krb5.conf=%s\"" % SecurityConfig.KRB5CONF_PATH
else:
return ""
def props(self, prefix=''):
"""
Return properties as string with line separators, optionally with a prefix.
This is used to append security config properties to
a properties file.
:param prefix: prefix to add to each property
:return: a string containing line-separated properties
"""
if self.security_protocol == SecurityConfig.PLAINTEXT:
return ""
if self.has_sasl and not self.static_jaas_conf and 'sasl.jaas.config' not in self.properties:
raise Exception("JAAS configuration property has not yet been initialized")
config_lines = (prefix + key + "=" + value for key, value in self.properties.iteritems())
# Extra blank lines ensure this can be appended/prepended safely
return "\n".join(itertools.chain([""], config_lines, [""]))
def __str__(self):
"""
Return properties as a string with line separators.
"""
return self.props()
|
import os
import pickle
import PVGeo
import pyvista as pv
import pandas as pd
from ela.classification import GridInterpolation
from ela.spatial import create_meshgrid_cartesian
from ela.visual import *
'''
@author: Guanjie Huang
@date: Aug 16th,2019
This class is used to process data before generating the 3D images
'''
class VisualizeDataProcess:
def __init__(self):
# self.height_Adjustment_factor=height_Adjustment_factor
self.scaled_from_height_colname = 'scaled_from_height'
self.scaled_to_height_colname = 'scaled_to_height'
self.dem_x_min = 0
self.dem_x_max = 0
self.dem_y_min = 0
self.dem_y_max = 0
self.ahd_max = 0
self.ahd_min = 0
self.grid_res = ''
self.scalar_prop_name = "litho_num"
def drill_file_read(self, file_path):
"""Read drill data file
Args:
file_path (str): drill data file path
Returns:
df(pandas.core.frame.DataFrame)
"""
df = pd.read_pickle(file_path)
return df
def dem_file_read(self, file_path):
"""Read dem data file
Args:
file_path (str): drill data file path
Returns:
dem_array_date(pandas.core.frame.DataFrame)
"""
with open(file_path, 'rb') as handle:
dem_array_data = pickle.load(handle)
handle.close()
return dem_array_data
def drill_data_initial(self, drill_data, depth_from_ahd=DEPTH_FROM_AHD_COL, depth_to_ahd=DEPTH_TO_AHD_COL):
"""initial class variables and clean drilling data
Args:
drill_data (pandas.core.frame.DataFrame): original drilling data
depth_from_ahd(str):set the column name of depth from AHD, default DEPTH_FROM_AHD_COL
depth_to_ahd(str):set the column name of depth to AHD, default DEPTH_TO_AHD_COL
Returns:
drill_data(pandas.core.frame.DataFrame)
"""
self.ahd_max = drill_data[depth_from_ahd].max()
self.ahd_min = drill_data[depth_to_ahd].min()
# clean the invalid data
return drill_data.dropna(subset=[depth_to_ahd, depth_from_ahd])
def dem_data_initial(self, dem_array_data, dem_bounds='bounds', dem_grid_res='grid_res'):
"""initial class variables and clean dem data
Args:
dem_array_data (pandas.core.frame.DataFrame): original dem data
dem_bounds(str): set bounds column name according to dem files
dem_grid_res(str): set grid_res column name according to dem files
Returns:
dem_array_data(pandas.core.frame.DataFrame)
"""
self.dem_x_min, self.dem_x_max, self.dem_y_min, self.dem_y_max = dem_array_data[dem_bounds]
self.grid_res = dem_array_data[dem_grid_res]
return dem_array_data
def drill_data_process(self, drill_data, height_adjustment_factor=20, depth_from_ahd=DEPTH_FROM_AHD_COL,
depth_to_ahd=DEPTH_TO_AHD_COL, drill_east='Easting', drill_north='Northing',
boreID='BoreID', prime_lithology='Lithology_1_num', min_tube_radius=10):
"""The whole data process from drill data to PolyData dictionary
Args:
drill_data(pandas.core.frame.DataFrame): original drilling data
height_adjustment_factor (int): Height scaling factor, default 20.
depth_from_ahd(str):set the column name of depth from AHD, default DEPTH_FROM_AHD_COL
depth_to_ahd(str):set the column name of depth to AHD, default DEPTH_TO_AHD_COL
drill_east(str):set the column name of point's x location in drilling data, default "Easting"
drill_north(str):set the column name of point's y's location in drilling data, default "Northing"
boreID(str):set the column name of bore hole ID,default "BoreID"
prime_lithology(str):set the prime lithology column name
min_tube_radius(int):set the min radius of borehole tube
Returns:
lines_dict(dict): PolyData dictionary.
"""
# data = self.drill_file_read(file_path, depth_from_ahd, depth_to_ahd)
fixed_data = self.drill_data_initial(drill_data, depth_from_ahd, depth_to_ahd)
data = self.add_scaled_height_column(fixed_data, height_adjustment_factor, depth_from_ahd, depth_to_ahd)
well_dict = self.build_well_dict(data, boreID)
# = self.add_missing_height_data(well_dict)
point_dict = self.build_points_dict(well_dict, drill_east, drill_north)
lines_dict = self.point_to_lines_dict(point_dict)
lines_dict = self.add_lithology_based_scalar(well_dict, lines_dict, prime_lithology, min_tube_radius)
return lines_dict
def dem_data_process(self, dem_array_data, height_adjustment_factor, dem_mesh_xy='mesh_xy', dem_arrays='dem_array',
dem_bounds='bounds', dem_grid_res='grid_res'):
"""The whole data process from dem data to pv.StructuredGrid
Args:
dem_array_data (pandas.core.frame.DataFrame): original dem data
height_adjustment_factor (int): Height scaling factor, default 20 .
dem_mesh_xy(str): set mesh_xy column name according to dem files
dem_arrays(str): set dem array column name according to dem files
dem_bounds(str): set bounds column name according to dem files
dem_grid_res(str): set grid_res column name according to dem files
Returns:
Grid(pyvista.core.pointset.StructuredGrid)
"""
dem_array_data = self.dem_data_initial(dem_array_data, dem_bounds, dem_grid_res)
xx, yy = dem_array_data[dem_mesh_xy]
dem_array = dem_array_data[dem_arrays]
grid = pv.StructuredGrid(xx, yy, dem_array * height_adjustment_factor)
return grid
def lithology_layer_process(self, drill_data, dem_array_data, storage_file_name, height_adjustment_factor=20,
layer_from=0, layer_to=0, dem_bounds='bounds', dem_grid_res='grid_res',
dem_mesh_xy='mesh_xy', drill_east='Easting', drill_north='Northing',
dem_arrays='dem_array', depth_from_ahd=DEPTH_FROM_AHD_COL,
depth_to_ahd=DEPTH_TO_AHD_COL):
"""add points lithology type, expands lines to tube based on lithology number
Args:
drill_data(pandas.core.frame.DataFrame): original drilling data
dem_array_data (pandas.core.frame.DataFrame): original dem data
storage_file_name(str): set the name of the save path for testing sample's
lithology classification array
height_adjustment_factor(int): height scala factor
layer_from (float): set the begin number of layers
layer_to (float): set the end number of layers
dem_bounds(str): set bounds column name according to dem files
dem_grid_res(str): set grid_res column name according to dem files
dem_mesh_xy(str): set mesh_xy column name according to dem files
drill_east(str):set the column name of point's x location in drilling data, default "Easting"
drill_north(str):set the column name of point's y's location in drilling data, default "Northing"
dem_arrays(str): set dem array column name according to dem files
depth_from_ahd(str):set the column name of depth from AHD, default DEPTH_FROM_AHD_COL
depth_to_ahd(str):set the column name of depth to AHD, default DEPTH_TO_AHD_COL
Returns:
layer_mesh(pyvista.core.pointset.UnstructuredGrid): layer mesh for display use
"""
# drill_data = self.drill_file_read(drill_file_path, depth_from_ahd, depth_to_ahd)
# dem_array_data = self.dem_file_read(dem_file_path, dem_bounds, dem_grid_res)
path = os.path.join(storage_file_name, "lithology_3d_array.pkl")
try:
with open(path, 'rb') as handle:
lithology_3d_array = pickle.load(handle)
handle.close()
except:
drill_data = self.drill_data_initial(drill_data, depth_from_ahd, depth_to_ahd)
dem_array_data = self.dem_data_initial(dem_array_data, dem_bounds, dem_grid_res)
lithology_3d_array = self.build_layer_data(drill_data, dem_array_data, dem_mesh_xy, drill_east, drill_north)
lithology_3d_array = self.clean_over_bound_data(lithology_3d_array, dem_array_data, dem_arrays)
# lithology_3d_array = self.vag_clean(lithology_3d_array, dem_array_data)
folder = os.path.exists(path)
if not folder:
os.makedirs(storage_file_name)
with open(path, "wb") as cf:
pickle.dump(lithology_3d_array, cf)
cf.close()
layer_mesh = self.build_layer_mesh(lithology_3d_array, height_adjustment_factor, layer_from, layer_to)
return layer_mesh
def add_scaled_height_column(self, data, height_adjustment_factor, depth_from_ahd=DEPTH_FROM_AHD_COL,
depth_to_ahd=DEPTH_TO_AHD_COL):
"""Add scaled height columns to data frame
Args:
data (pandas.core.frame.DataFrame):original data
height_adjustment_factor (int): Height scaling factor.
depth_from_ahd(str):set the column name of depth from AHD, default DEPTH_FROM_AHD_COL
depth_to_ahd(str):set the column name of depth to AHD, default DEPTH_TO_AHD_COL
Returns:
data(pandas.core.frame.DataFrame): modified data
"""
# scaled_from_height_colname = 'scaled_from_height'
data.loc[:, self.scaled_from_height_colname] = data[depth_from_ahd].values * height_adjustment_factor
# scaled_to_height_colname = 'scaled_to_height'
data.loc[:, self.scaled_to_height_colname] = data[depth_to_ahd].values * height_adjustment_factor
return data
def build_well_dict(self, data, boreID='BoreID'):
"""build dictionary according to BoreID
Args:
data (pandas.core.frame.DataFrame):original data
boreID(str):set the column name of bore hole ID,default "BoreID"
Returns:
well_dict(dict()): wells dictionary
"""
data.loc[:, 'name'] = data.loc[:, boreID].values.astype(str)
wells = data.name.unique()
well_dict = {}
for well in wells:
well_dict["{0}".format(well)] = data[data.name == well]
return well_dict
# def add_missing_height_data(self, well_dict):
# """Add the smallest height_to data to height_from data (len(well_dict[i])+1)
# Args:
# well_dict(dict()): original dictionary
# Returns:
# well_dict(dict()): modified dictionary
# """
# bad_well = []
# for well in well_dict.keys():
# origin_well_df = well_dict.get(well)
# after_well_df = origin_well_df.copy()
# add_index = origin_well_df[self.scaled_to_height_colname].idxmin()
# if np.isnan(add_index):
# bad_well.append(well)
# continue
# line = origin_well_df.loc[add_index].copy()
# line.scaled_from_height = line.scaled_to_height
# line = line.to_frame()
# temp = []
# for value in line.values:
# if value[0]:
# temp.append(value[0])
# else:
# temp.append(0)
# after_well_df.loc["new"] = temp
# well_dict[well] = after_well_df
# for i in range(len(bad_well)):
# well_dict.pop(bad_well[i])
# return well_dict
def build_points_dict(self, well_dict, drill_east="Easting", drill_north="Northing"):
"""build points dictionary from wells dictionary
Args:
well_dict(dict()): wells dictionary
drill_east(str):set the column name of point's x location in drilling data, default "Easting"
drill_north(str):set the column name of point's y's location in drilling data, default "Northing"
Returns:
points_dict(dict()): zip points axis for points
"""
c = np.concatenate
points_dict = {}
for points in well_dict:
e = well_dict[points][drill_east].values
n = well_dict[points][drill_north].values
points_dict["{0}".format(points)] = np.array(
list(
zip(
c((e, e)),
c((n, n)),
c((
well_dict[points][self.scaled_from_height_colname].values,
well_dict[points][self.scaled_to_height_colname].values + 1.0))
)
)
)
return points_dict
def point_to_lines_dict(self, points_dict):
"""build lines dictionary from points dictionary
Args:
points_dict(dict()): points dictionary
Returns:
lines_dict(dict()): build lines between same well points
"""
lines_dict = {}
for bore_id in points_dict:
poly = PVGeo.points_to_poly_data(points_dict[bore_id])
lines_dict["{0}".format(bore_id)] = PVGeo.filters.AddCellConnToPoints(nearest_nbr=True).apply(poly)
# notice that the building of the lines need to follow the nearest neighbourhood search
return lines_dict
def add_lithology_based_scalar(self, well_dict, lines_dict, prime_lithology='Lithology_1_num', min_tube_radius=10):
"""add points lithology type, expands lines to tube based on lithology number
Args:
well_dict(dict()): wells dictionary
lines_dict(dict()):lines dictionary
prime_lithology(str):set the prime lithology column name
min_tube_radius(int): set the min radius of borehole tube
Returns:
lines_dict(dict()): with new attribute "GR" which represent lithology number, and expanded to tube.
"""
lines_dict_tmp = {}
for bore_id in lines_dict:
try:
vals = well_dict[bore_id][prime_lithology].values
bore_vis = lines_dict[bore_id]
bore_vis[self.scalar_prop_name] = np.concatenate((vals, vals)) # tops then bottoms of cylinders.
bore_vis.tube(radius=min_tube_radius, scalars=None, inplace=True)
# lines_dict[bore_id].tube(radius=10, scalars=dp.scalar_prop_name, inplace=True)
except Exception as e:
raise Exception("Lithology attribute processed for visualisation failed for bore ID %s" % (bore_id))
if len(vals) > 0:
lines_dict_tmp[bore_id] = lines_dict[bore_id]
lines_dict = lines_dict_tmp
return lines_dict
def build_layer_data(self, drill_data, dem_array_data, dem_mesh_xy='mesh_xy', drill_east='Easting',
drill_north='Northing'):
"""get the layer data from the function contains in ela
Args:
drill_data (pandas.core.frame.DataFrame): drill data
dem_array_data (pandas.core.frame.DataFrame): dem data
dem_mesh_xy(str): set mesh_xy column name according to dem files
drill_east(str):set the column name of point's x location in drilling data, default "Easting"
drill_north(str):set the column name of point's y's location in drilling data, default "Northing"
"""
n_neighbours = 10
xg, yg = dem_array_data[dem_mesh_xy]
m = create_meshgrid_cartesian(self.dem_x_min, self.dem_x_max, self.dem_y_min, self.dem_y_max, self.grid_res)
z_coords = np.arange(self.ahd_min, self.ahd_max, 1)
dim_x, dim_y = xg.shape
dim_z = len(z_coords)
dims = (dim_x, dim_y, dim_z)
lithology_3d_array = np.empty(dims)
gi = GridInterpolation(easting_col=drill_east, northing_col=drill_north)
gi.interpolate_volume(lithology_3d_array, drill_data, PRIMARY_LITHO_NUM_COL, z_coords, n_neighbours, m)
return lithology_3d_array
def clean_over_bound_data(self, lithology_3d_array, dem_array_data, dem_arrays='dem_array'):
"""accurate process data that exceeds limits
(we suppose that the lithology would not higher than the ground surface),
accurate but slower
Args:
lithology_3d_array (np.array of dim 3): lithology numeric (lithology class) identifiers
dem_array_data (pandas.core.frame.DataFrame): dem data
dem_arrays(str): set dem array column name according to dem files
"""
dem_z = dem_array_data[dem_arrays]
for i in range(lithology_3d_array.shape[0]):
for j in range(lithology_3d_array.shape[1]):
if np.isnan(dem_z[i][j]):
lithology_3d_array[i][j] = None
continue
for k in range(lithology_3d_array.shape[2]):
height = k * (self.ahd_max - self.ahd_min) / lithology_3d_array.shape[2] + self.ahd_min
if height >= dem_z[i][j]:
for tmp in range(k, lithology_3d_array.shape[2]):
lithology_3d_array[i][j][tmp] = None
break
return lithology_3d_array
def vag_clean(self, lithology_3d_array, dem_array_data, dem_arrays='dem_array'):
"""Simply process data that exceeds limits(we suppose that the lithology would not higher than the ground surface),
not accurate but faster
Args:
lithology_3d_array (np.array of dim 3): lithology numeric (lithology class) identifiers
dem_array_data (pandas.core.frame.DataFrame: dem data
dem_arrays(str): set dem array column name according to dem files
"""
dem_z = dem_array_data[dem_arrays]
for i in range(1, lithology_3d_array.shape[0]):
for j in range(1, lithology_3d_array.shape[1]):
if np.isnan(dem_z[i][j]):
k = 0
else:
k = int(dem_z[i][j] - self.ahd_min)
for tep in range(k, lithology_3d_array.shape[2]):
lithology_3d_array[i][j][tep] = None
return lithology_3d_array
def build_layer_mesh(self, lithology_3d_array, height_adjustment_factor, layer_from, layer_to):
"""Build a 3D mesh of selected lithology class codes by binary bining cells. Use filter to select aim layers.
Args:
lithology_3d_array (np.array of dim 3): lithology numeric (lithology class) identifiers
height_adjustment_factor (int): height scale factor
layer_from (float): set the begin number of layers
layer_to (float): set the end number of layers
"""
volume = pv.UniformGrid()
volume.dimensions = np.array(lithology_3d_array.shape)
volume.origin = (self.dem_x_min, self.dem_y_min, self.ahd_min * height_adjustment_factor)
x_label = (self.dem_x_max - self.dem_x_min) / lithology_3d_array.shape[0]
y_label = (self.dem_y_max - self.dem_y_min) / lithology_3d_array.shape[1]
z_label = (self.ahd_max - self.ahd_min) * height_adjustment_factor / lithology_3d_array.shape[2]
volume.spacing = (x_label, y_label, z_label)
volume.point_arrays["Lithology"] = lithology_3d_array.flatten('F')
volume.set_active_scalar("Lithology")
threshed = volume.threshold([layer_from, layer_to])
return threshed
# def exist_3d_lithology(self):
def extract_single_lithology_class_3d(self, lithology_3d_classes, class_value):
"""Transform a 3D volume of lithology class codes by binary bining cells as being either of a class value or
other. Preprocessing primarily for 3D visualisation for pyvista(not use in the sample ).
Args:
lithology_3d_classes (np.array of dim 3): lithology numeric (lithology class) identifiers
class_value (float): class code of interest
"""
single_litho = np.copy(lithology_3d_classes)
other_value = None
single_litho[(single_litho != class_value)] = other_value
# We burn the edges of the volume, as I suspect this is necessary to have a more intuitive viz (otherwise non
# closed volumes)
single_litho[0, :, :] = other_value
single_litho[-1, :, :] = other_value
single_litho[:, 0, :] = other_value
single_litho[:, -1, :] = other_value
single_litho[:, :, 0] = other_value
single_litho[:, :, -1] = other_value
return single_litho
# burn_volume
|
import django_filters
from django.conf import settings
from django.db import models
from django.test import TestCase
from mptt.fields import TreeForeignKey
from taggit.managers import TaggableManager
from dcim.choices import *
from dcim.fields import MACAddressField
from dcim.filters import DeviceFilterSet, SiteFilterSet
from dcim.models import (
Device, DeviceRole, DeviceType, Interface, Manufacturer, Platform, Rack, Region, Site
)
from extras.models import TaggedItem
from utilities.filters import (
BaseFilterSet, MACAddressFilter, MultiValueCharFilter, MultiValueDateFilter, MultiValueDateTimeFilter,
MultiValueNumberFilter, MultiValueTimeFilter, TagFilter, TreeNodeMultipleChoiceFilter,
)
class TreeNodeMultipleChoiceFilterTest(TestCase):
class SiteFilterSet(django_filters.FilterSet):
region = TreeNodeMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name='region__in',
to_field_name='slug',
)
def setUp(self):
super().setUp()
self.region1 = Region.objects.create(name='Test Region 1', slug='test-region-1')
self.region2 = Region.objects.create(name='Test Region 2', slug='test-region-2')
self.site1 = Site.objects.create(region=self.region1, name='Test Site 1', slug='test-site1')
self.site2 = Site.objects.create(region=self.region2, name='Test Site 2', slug='test-site2')
self.site3 = Site.objects.create(region=None, name='Test Site 3', slug='test-site3')
self.queryset = Site.objects.all()
def test_filter_single(self):
kwargs = {'region': ['test-region-1']}
qs = self.SiteFilterSet(kwargs, self.queryset).qs
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0], self.site1)
def test_filter_multiple(self):
kwargs = {'region': ['test-region-1', 'test-region-2']}
qs = self.SiteFilterSet(kwargs, self.queryset).qs
self.assertEqual(qs.count(), 2)
self.assertEqual(qs[0], self.site1)
self.assertEqual(qs[1], self.site2)
def test_filter_null(self):
kwargs = {'region': [settings.FILTERS_NULL_CHOICE_VALUE]}
qs = self.SiteFilterSet(kwargs, self.queryset).qs
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0], self.site3)
def test_filter_combined(self):
kwargs = {'region': ['test-region-1', settings.FILTERS_NULL_CHOICE_VALUE]}
qs = self.SiteFilterSet(kwargs, self.queryset).qs
self.assertEqual(qs.count(), 2)
self.assertEqual(qs[0], self.site1)
self.assertEqual(qs[1], self.site3)
class DummyModel(models.Model):
"""
Dummy model used by BaseFilterSetTest for filter validation. Should never appear in a schema migration.
"""
charfield = models.CharField(
max_length=10
)
choicefield = models.IntegerField(
choices=(('A', 1), ('B', 2), ('C', 3))
)
datefield = models.DateField()
datetimefield = models.DateTimeField()
integerfield = models.IntegerField()
macaddressfield = MACAddressField()
timefield = models.TimeField()
treeforeignkeyfield = TreeForeignKey(
to='self',
on_delete=models.CASCADE
)
tags = TaggableManager(through=TaggedItem)
class BaseFilterSetTest(TestCase):
"""
Ensure that a BaseFilterSet automatically creates the expected set of filters for each filter type.
"""
class DummyFilterSet(BaseFilterSet):
charfield = django_filters.CharFilter()
macaddressfield = MACAddressFilter()
modelchoicefield = django_filters.ModelChoiceFilter(
field_name='integerfield', # We're pretending this is a ForeignKey field
queryset=Site.objects.all()
)
modelmultiplechoicefield = django_filters.ModelMultipleChoiceFilter(
field_name='integerfield', # We're pretending this is a ForeignKey field
queryset=Site.objects.all()
)
multiplechoicefield = django_filters.MultipleChoiceFilter(
field_name='choicefield'
)
multivaluecharfield = MultiValueCharFilter(
field_name='charfield'
)
tagfield = TagFilter()
treeforeignkeyfield = TreeNodeMultipleChoiceFilter(
queryset=DummyModel.objects.all()
)
class Meta:
model = DummyModel
fields = (
'charfield',
'choicefield',
'datefield',
'datetimefield',
'integerfield',
'macaddressfield',
'modelchoicefield',
'modelmultiplechoicefield',
'multiplechoicefield',
'tagfield',
'timefield',
'treeforeignkeyfield',
)
@classmethod
def setUpTestData(cls):
cls.filters = cls.DummyFilterSet().filters
def test_char_filter(self):
self.assertIsInstance(self.filters['charfield'], django_filters.CharFilter)
self.assertEqual(self.filters['charfield'].lookup_expr, 'exact')
self.assertEqual(self.filters['charfield'].exclude, False)
self.assertEqual(self.filters['charfield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['charfield__n'].exclude, True)
self.assertEqual(self.filters['charfield__ie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['charfield__ie'].exclude, False)
self.assertEqual(self.filters['charfield__nie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['charfield__nie'].exclude, True)
self.assertEqual(self.filters['charfield__ic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['charfield__ic'].exclude, False)
self.assertEqual(self.filters['charfield__nic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['charfield__nic'].exclude, True)
self.assertEqual(self.filters['charfield__isw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['charfield__isw'].exclude, False)
self.assertEqual(self.filters['charfield__nisw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['charfield__nisw'].exclude, True)
self.assertEqual(self.filters['charfield__iew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['charfield__iew'].exclude, False)
self.assertEqual(self.filters['charfield__niew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['charfield__niew'].exclude, True)
def test_mac_address_filter(self):
self.assertIsInstance(self.filters['macaddressfield'], MACAddressFilter)
self.assertEqual(self.filters['macaddressfield'].lookup_expr, 'exact')
self.assertEqual(self.filters['macaddressfield'].exclude, False)
self.assertEqual(self.filters['macaddressfield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['macaddressfield__n'].exclude, True)
self.assertEqual(self.filters['macaddressfield__ie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['macaddressfield__ie'].exclude, False)
self.assertEqual(self.filters['macaddressfield__nie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['macaddressfield__nie'].exclude, True)
self.assertEqual(self.filters['macaddressfield__ic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['macaddressfield__ic'].exclude, False)
self.assertEqual(self.filters['macaddressfield__nic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['macaddressfield__nic'].exclude, True)
self.assertEqual(self.filters['macaddressfield__isw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['macaddressfield__isw'].exclude, False)
self.assertEqual(self.filters['macaddressfield__nisw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['macaddressfield__nisw'].exclude, True)
self.assertEqual(self.filters['macaddressfield__iew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['macaddressfield__iew'].exclude, False)
self.assertEqual(self.filters['macaddressfield__niew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['macaddressfield__niew'].exclude, True)
def test_model_choice_filter(self):
self.assertIsInstance(self.filters['modelchoicefield'], django_filters.ModelChoiceFilter)
self.assertEqual(self.filters['modelchoicefield'].lookup_expr, 'exact')
self.assertEqual(self.filters['modelchoicefield'].exclude, False)
self.assertEqual(self.filters['modelchoicefield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['modelchoicefield__n'].exclude, True)
def test_model_multiple_choice_filter(self):
self.assertIsInstance(self.filters['modelmultiplechoicefield'], django_filters.ModelMultipleChoiceFilter)
self.assertEqual(self.filters['modelmultiplechoicefield'].lookup_expr, 'exact')
self.assertEqual(self.filters['modelmultiplechoicefield'].exclude, False)
self.assertEqual(self.filters['modelmultiplechoicefield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['modelmultiplechoicefield__n'].exclude, True)
def test_multi_value_char_filter(self):
self.assertIsInstance(self.filters['multivaluecharfield'], MultiValueCharFilter)
self.assertEqual(self.filters['multivaluecharfield'].lookup_expr, 'exact')
self.assertEqual(self.filters['multivaluecharfield'].exclude, False)
self.assertEqual(self.filters['multivaluecharfield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['multivaluecharfield__n'].exclude, True)
self.assertEqual(self.filters['multivaluecharfield__ie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['multivaluecharfield__ie'].exclude, False)
self.assertEqual(self.filters['multivaluecharfield__nie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['multivaluecharfield__nie'].exclude, True)
self.assertEqual(self.filters['multivaluecharfield__ic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['multivaluecharfield__ic'].exclude, False)
self.assertEqual(self.filters['multivaluecharfield__nic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['multivaluecharfield__nic'].exclude, True)
self.assertEqual(self.filters['multivaluecharfield__isw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['multivaluecharfield__isw'].exclude, False)
self.assertEqual(self.filters['multivaluecharfield__nisw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['multivaluecharfield__nisw'].exclude, True)
self.assertEqual(self.filters['multivaluecharfield__iew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['multivaluecharfield__iew'].exclude, False)
self.assertEqual(self.filters['multivaluecharfield__niew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['multivaluecharfield__niew'].exclude, True)
def test_multi_value_date_filter(self):
self.assertIsInstance(self.filters['datefield'], MultiValueDateFilter)
self.assertEqual(self.filters['datefield'].lookup_expr, 'exact')
self.assertEqual(self.filters['datefield'].exclude, False)
self.assertEqual(self.filters['datefield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['datefield__n'].exclude, True)
self.assertEqual(self.filters['datefield__lt'].lookup_expr, 'lt')
self.assertEqual(self.filters['datefield__lt'].exclude, False)
self.assertEqual(self.filters['datefield__lte'].lookup_expr, 'lte')
self.assertEqual(self.filters['datefield__lte'].exclude, False)
self.assertEqual(self.filters['datefield__gt'].lookup_expr, 'gt')
self.assertEqual(self.filters['datefield__gt'].exclude, False)
self.assertEqual(self.filters['datefield__gte'].lookup_expr, 'gte')
self.assertEqual(self.filters['datefield__gte'].exclude, False)
def test_multi_value_datetime_filter(self):
self.assertIsInstance(self.filters['datetimefield'], MultiValueDateTimeFilter)
self.assertEqual(self.filters['datetimefield'].lookup_expr, 'exact')
self.assertEqual(self.filters['datetimefield'].exclude, False)
self.assertEqual(self.filters['datetimefield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['datetimefield__n'].exclude, True)
self.assertEqual(self.filters['datetimefield__lt'].lookup_expr, 'lt')
self.assertEqual(self.filters['datetimefield__lt'].exclude, False)
self.assertEqual(self.filters['datetimefield__lte'].lookup_expr, 'lte')
self.assertEqual(self.filters['datetimefield__lte'].exclude, False)
self.assertEqual(self.filters['datetimefield__gt'].lookup_expr, 'gt')
self.assertEqual(self.filters['datetimefield__gt'].exclude, False)
self.assertEqual(self.filters['datetimefield__gte'].lookup_expr, 'gte')
self.assertEqual(self.filters['datetimefield__gte'].exclude, False)
def test_multi_value_number_filter(self):
self.assertIsInstance(self.filters['integerfield'], MultiValueNumberFilter)
self.assertEqual(self.filters['integerfield'].lookup_expr, 'exact')
self.assertEqual(self.filters['integerfield'].exclude, False)
self.assertEqual(self.filters['integerfield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['integerfield__n'].exclude, True)
self.assertEqual(self.filters['integerfield__lt'].lookup_expr, 'lt')
self.assertEqual(self.filters['integerfield__lt'].exclude, False)
self.assertEqual(self.filters['integerfield__lte'].lookup_expr, 'lte')
self.assertEqual(self.filters['integerfield__lte'].exclude, False)
self.assertEqual(self.filters['integerfield__gt'].lookup_expr, 'gt')
self.assertEqual(self.filters['integerfield__gt'].exclude, False)
self.assertEqual(self.filters['integerfield__gte'].lookup_expr, 'gte')
self.assertEqual(self.filters['integerfield__gte'].exclude, False)
def test_multi_value_time_filter(self):
self.assertIsInstance(self.filters['timefield'], MultiValueTimeFilter)
self.assertEqual(self.filters['timefield'].lookup_expr, 'exact')
self.assertEqual(self.filters['timefield'].exclude, False)
self.assertEqual(self.filters['timefield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['timefield__n'].exclude, True)
self.assertEqual(self.filters['timefield__lt'].lookup_expr, 'lt')
self.assertEqual(self.filters['timefield__lt'].exclude, False)
self.assertEqual(self.filters['timefield__lte'].lookup_expr, 'lte')
self.assertEqual(self.filters['timefield__lte'].exclude, False)
self.assertEqual(self.filters['timefield__gt'].lookup_expr, 'gt')
self.assertEqual(self.filters['timefield__gt'].exclude, False)
self.assertEqual(self.filters['timefield__gte'].lookup_expr, 'gte')
self.assertEqual(self.filters['timefield__gte'].exclude, False)
def test_multiple_choice_filter(self):
self.assertIsInstance(self.filters['multiplechoicefield'], django_filters.MultipleChoiceFilter)
self.assertEqual(self.filters['multiplechoicefield'].lookup_expr, 'exact')
self.assertEqual(self.filters['multiplechoicefield'].exclude, False)
self.assertEqual(self.filters['multiplechoicefield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['multiplechoicefield__n'].exclude, True)
self.assertEqual(self.filters['multiplechoicefield__ie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['multiplechoicefield__ie'].exclude, False)
self.assertEqual(self.filters['multiplechoicefield__nie'].lookup_expr, 'iexact')
self.assertEqual(self.filters['multiplechoicefield__nie'].exclude, True)
self.assertEqual(self.filters['multiplechoicefield__ic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['multiplechoicefield__ic'].exclude, False)
self.assertEqual(self.filters['multiplechoicefield__nic'].lookup_expr, 'icontains')
self.assertEqual(self.filters['multiplechoicefield__nic'].exclude, True)
self.assertEqual(self.filters['multiplechoicefield__isw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['multiplechoicefield__isw'].exclude, False)
self.assertEqual(self.filters['multiplechoicefield__nisw'].lookup_expr, 'istartswith')
self.assertEqual(self.filters['multiplechoicefield__nisw'].exclude, True)
self.assertEqual(self.filters['multiplechoicefield__iew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['multiplechoicefield__iew'].exclude, False)
self.assertEqual(self.filters['multiplechoicefield__niew'].lookup_expr, 'iendswith')
self.assertEqual(self.filters['multiplechoicefield__niew'].exclude, True)
def test_tag_filter(self):
self.assertIsInstance(self.filters['tagfield'], TagFilter)
self.assertEqual(self.filters['tagfield'].lookup_expr, 'exact')
self.assertEqual(self.filters['tagfield'].exclude, False)
self.assertEqual(self.filters['tagfield__n'].lookup_expr, 'exact')
self.assertEqual(self.filters['tagfield__n'].exclude, True)
def test_tree_node_multiple_choice_filter(self):
self.assertIsInstance(self.filters['treeforeignkeyfield'], TreeNodeMultipleChoiceFilter)
# TODO: lookup_expr different for negation?
self.assertEqual(self.filters['treeforeignkeyfield'].lookup_expr, 'exact')
self.assertEqual(self.filters['treeforeignkeyfield'].exclude, False)
self.assertEqual(self.filters['treeforeignkeyfield__n'].lookup_expr, 'in')
self.assertEqual(self.filters['treeforeignkeyfield__n'].exclude, True)
class DynamicFilterLookupExpressionTest(TestCase):
"""
Validate function of automatically generated filters using the Device model as an example.
"""
device_queryset = Device.objects.all()
device_filterset = DeviceFilterSet
site_queryset = Site.objects.all()
site_filterset = SiteFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
device_types = (
DeviceType(manufacturer=manufacturers[0], model='Model 1', slug='model-1', is_full_depth=True),
DeviceType(manufacturer=manufacturers[1], model='Model 2', slug='model-2', is_full_depth=True),
DeviceType(manufacturer=manufacturers[2], model='Model 3', slug='model-3', is_full_depth=False),
)
DeviceType.objects.bulk_create(device_types)
device_roles = (
DeviceRole(name='Device Role 1', slug='device-role-1'),
DeviceRole(name='Device Role 2', slug='device-role-2'),
DeviceRole(name='Device Role 3', slug='device-role-3'),
)
DeviceRole.objects.bulk_create(device_roles)
platforms = (
Platform(name='Platform 1', slug='platform-1'),
Platform(name='Platform 2', slug='platform-2'),
Platform(name='Platform 3', slug='platform-3'),
)
Platform.objects.bulk_create(platforms)
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='abc-site-1', region=regions[0], asn=65001),
Site(name='Site 2', slug='def-site-2', region=regions[1], asn=65101),
Site(name='Site 3', slug='ghi-site-3', region=regions[2], asn=65201),
)
Site.objects.bulk_create(sites)
racks = (
Rack(name='Rack 1', site=sites[0]),
Rack(name='Rack 2', site=sites[1]),
Rack(name='Rack 3', site=sites[2]),
)
Rack.objects.bulk_create(racks)
devices = (
Device(name='Device 1', device_type=device_types[0], device_role=device_roles[0], platform=platforms[0], serial='ABC', asset_tag='1001', site=sites[0], rack=racks[0], position=1, face=DeviceFaceChoices.FACE_FRONT, status=DeviceStatusChoices.STATUS_ACTIVE, local_context_data={"foo": 123}),
Device(name='Device 2', device_type=device_types[1], device_role=device_roles[1], platform=platforms[1], serial='DEF', asset_tag='1002', site=sites[1], rack=racks[1], position=2, face=DeviceFaceChoices.FACE_FRONT, status=DeviceStatusChoices.STATUS_STAGED),
Device(name='Device 3', device_type=device_types[2], device_role=device_roles[2], platform=platforms[2], serial='GHI', asset_tag='1003', site=sites[2], rack=racks[2], position=3, face=DeviceFaceChoices.FACE_REAR, status=DeviceStatusChoices.STATUS_FAILED),
)
Device.objects.bulk_create(devices)
interfaces = (
Interface(device=devices[0], name='Interface 1', mac_address='00-00-00-00-00-01'),
Interface(device=devices[0], name='Interface 2', mac_address='aa-00-00-00-00-01'),
Interface(device=devices[1], name='Interface 3', mac_address='00-00-00-00-00-02'),
Interface(device=devices[1], name='Interface 4', mac_address='bb-00-00-00-00-02'),
Interface(device=devices[2], name='Interface 5', mac_address='00-00-00-00-00-03'),
Interface(device=devices[2], name='Interface 6', mac_address='cc-00-00-00-00-03'),
)
Interface.objects.bulk_create(interfaces)
def test_site_name_negation(self):
params = {'name__n': ['Site 1']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_site_slug_icontains(self):
params = {'slug__ic': ['-1']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 1)
def test_site_slug_icontains_negation(self):
params = {'slug__nic': ['-1']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_site_slug_startswith(self):
params = {'slug__isw': ['abc']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 1)
def test_site_slug_startswith_negation(self):
params = {'slug__nisw': ['abc']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_site_slug_endswith(self):
params = {'slug__iew': ['-1']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 1)
def test_site_slug_endswith_negation(self):
params = {'slug__niew': ['-1']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_site_asn_lt(self):
params = {'asn__lt': [65101]}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 1)
def test_site_asn_lte(self):
params = {'asn__lte': [65101]}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_site_asn_gt(self):
params = {'asn__lt': [65101]}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 1)
def test_site_asn_gte(self):
params = {'asn__gte': [65101]}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_site_region_negation(self):
params = {'region__n': ['region-1']}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_site_region_id_negation(self):
params = {'region_id__n': [Region.objects.first().pk]}
self.assertEqual(SiteFilterSet(params, self.site_queryset).qs.count(), 2)
def test_device_name_eq(self):
params = {'name': ['Device 1']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 1)
def test_device_name_negation(self):
params = {'name__n': ['Device 1']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 2)
def test_device_name_startswith(self):
params = {'name__isw': ['Device']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 3)
def test_device_name_startswith_negation(self):
params = {'name__nisw': ['Device 1']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 2)
def test_device_name_endswith(self):
params = {'name__iew': [' 1']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 1)
def test_device_name_endswith_negation(self):
params = {'name__niew': [' 1']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 2)
def test_device_name_icontains(self):
params = {'name__ic': [' 2']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 1)
def test_device_name_icontains_negation(self):
params = {'name__nic': [' ']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 0)
def test_device_mac_address_negation(self):
params = {'mac_address__n': ['00-00-00-00-00-01', 'aa-00-00-00-00-01']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 2)
def test_device_mac_address_startswith(self):
params = {'mac_address__isw': ['aa:']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 1)
def test_device_mac_address_startswith_negation(self):
params = {'mac_address__nisw': ['aa:']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 2)
def test_device_mac_address_endswith(self):
params = {'mac_address__iew': [':02']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 1)
def test_device_mac_address_endswith_negation(self):
params = {'mac_address__niew': [':02']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 2)
def test_device_mac_address_icontains(self):
params = {'mac_address__ic': ['aa:', 'bb']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 2)
def test_device_mac_address_icontains_negation(self):
params = {'mac_address__nic': ['aa:', 'bb']}
self.assertEqual(DeviceFilterSet(params, self.device_queryset).qs.count(), 1)
|
'''
Copyright (c) 2013, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
'''
'''
This material was prepared as an account of work sponsored by an
agency of the United States Government. Neither the United States
Government nor the United States Department of Energy, nor Battelle,
nor any of their employees, nor any jurisdiction or organization
that has cooperated in the development of these materials, makes
any warranty, express or implied, or assumes any legal liability
or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed,
or represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or
service by trade name, trademark, manufacturer, or otherwise does
not necessarily constitute or imply its endorsement, recommendation,
r favoring by the United States Government or any agency thereof,
or Battelle Memorial Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the
United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
'''
import os
import abc
from base import BaseSmapVolttron, BaseInterface, BaseRegister
from csv import DictReader
import struct
path = os.path.dirname(os.path.abspath(__file__))
default_config = os.path.join(path, "example.csv")
default_directory = os.path.dirname(os.path.abspath(__file__))
class FileRegister(BaseRegister):
__metaclass__ = abc.ABCMeta
def __init__(self, type_string, read_only, pointName, units, description = '', directory='.'):
self.file_path = os.path.join(directory, pointName)
#We only use struct to sort out the type of the register.
self.bit_register = type_string.lower() == 'bool'
register_type = 'bit' if self.bit_register else 'byte'
super(FileRegister, self).__init__(register_type, read_only, pointName, units, description = '')
if self.bit_register:
self.python_type = int
else:
try:
self.parse_struct = struct.Struct(type_string)
except struct.error:
raise ValueError("Invalid Register '" + type_string + "' for point " + pointName)
struct_types = [type(x) for x in self.parse_struct.unpack('\x00'*self.parse_struct.size)]
if len(struct_types) != 1:
raise ValueError("Invalid length Register '" + type_string + "' for point " + pointName)
self.python_type = struct_types[0]
def parse_value(self, value_string):
return self.python_type(value_string)
def get_value(self):
try:
with open(self.file_path) as f:
return self.parse_value(f.read())
except (ValueError, IOError):
#Build up default files.
value = self.parse_value('0')
print "Creating default file for point: ", self.point_name
with open(self.file_path, 'w') as f:
f.write(str(value))
return value
def set_value(self, value):
self.value = value
with open(self.file_path, 'w') as f:
f.write(str(value))
return value
class FileInterface(BaseInterface):
def __init__(self, directory=default_directory, config_file=default_config, **kwargs):
super(FileInterface, self).__init__(**kwargs)
self.parse_config(directory, config_file)
def parse_config(self, directory, config_file):
if config_file is None:
return
with open(config_file, 'rb') as f:
configDict = DictReader(f)
for regDef in configDict:
#Skip lines that have no address yet.
if not regDef['Point Name']:
continue
io_type = regDef['Modbus Register']
read_only = regDef['Writable'].lower() != 'true'
point_path = regDef['PNNL Point Name']
description = regDef['Notes']
units = regDef['Units']
register = FileRegister(io_type, read_only, point_path, units, description = description, directory=directory)
self.insert_register(register)
#Getting data in a async manner
def get_point_async(self, point_name):
return self.get_point_sync(point_name)
#setting data in a async manner
def set_point_async(self, point_name, value):
return self.set_point_sync(point_name, value)
#Getting data in a sync manner
def get_point_sync(self, point_name):
register = self.point_map[point_name]
return register.get_value()
#setting data in a sync manner
def set_point_sync(self, point_name, value):
register = self.point_map[point_name]
return register.set_value(value)
def scrape_all(self):
result_dict={}
try:
for point in self.point_map:
result_dict[point]=self.get_point_sync(point)
except (IOError):
print ("ERROR: Failed to scrape device at " +
self.ip_address + ":" + str(self.port) + " " +
"ID: " + str(self.slave_id))
return None
return result_dict
class File(BaseSmapVolttron):
"""
Fake device backed by a file for each register.
Designed to use the modbus configuration file for setup.
"""
def setup(self, opts):
super(File, self).setup(opts)
self.set_metadata('/', {'Instrument/Manufacturer' : 'Pacific Northwest National Labratory',
'Extra/Driver' : 'volttron.drivers.file_driver.File'})
def get_interface(self, opts):
directory = opts.get('directory', default_directory)
config_file = opts.get('register_config', default_config)
return FileInterface(directory=directory, config_file=config_file)
|
#!C:\Users\WEMERSON\Documents\kivy\kivy_venv\Scripts\python.exe
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
|
#!/usr/bin/env python
# encoding: utf-8
"""A Snippet instance is an instance of a Snippet Definition.
That is, when the user expands a snippet, a SnippetInstance is created
to keep track of the corresponding TextObjects. The Snippet itself is
also a TextObject.
"""
from UltiSnips import _vim
from UltiSnips.position import Position
from UltiSnips.text_objects._base import EditableTextObject, \
NoneditableTextObject
class SnippetInstance(EditableTextObject):
"""See module docstring."""
# pylint:disable=protected-access
def __init__(self, snippet, parent, initial_text,
start, end, visual_content, last_re, globals):
if start is None:
start = Position(0, 0)
if end is None:
end = Position(0, 0)
self.snippet = snippet
self._cts = 0
self.locals = {'match': last_re}
self.globals = globals
self.visual_content = visual_content
EditableTextObject.__init__(self, parent, start, end, initial_text)
def replace_initial_text(self):
"""Puts the initial text of all text elements into Vim."""
def _place_initial_text(obj):
"""recurses on the children to do the work."""
obj.overwrite()
if isinstance(obj, EditableTextObject):
for child in obj._children:
_place_initial_text(child)
_place_initial_text(self)
def replay_user_edits(self, cmds):
"""Replay the edits the user has done to keep endings of our Text
objects in sync with reality."""
for cmd in cmds:
self._do_edit(cmd)
def update_textobjects(self):
"""Update the text objects that should change automagically after the
users edits have been replayed.
This might also move the Cursor
"""
vc = _VimCursor(self)
done = set()
not_done = set()
def _find_recursive(obj):
"""Finds all text objects and puts them into 'not_done'."""
if isinstance(obj, EditableTextObject):
for child in obj._children:
_find_recursive(child)
not_done.add(obj)
_find_recursive(self)
counter = 10
while (done != not_done) and counter:
# Order matters for python locals!
for obj in sorted(not_done - done):
if obj._update(done):
done.add(obj)
counter -= 1
if not counter:
raise RuntimeError(
'The snippets content did not converge: Check for Cyclic '
'dependencies or random strings in your snippet. You can use '
"'if not snip.c' to make sure to only expand random output "
'once.')
vc.to_vim()
self._del_child(vc)
def select_next_tab(self, backwards=False):
"""Selects the next tabstop or the previous if 'backwards' is True."""
if self._cts is None:
return
if backwards:
cts_bf = self._cts
res = self._get_prev_tab(self._cts)
if res is None:
self._cts = cts_bf
return self._tabstops.get(self._cts, None)
self._cts, ts = res
return ts
else:
res = self._get_next_tab(self._cts)
if res is None:
self._cts = None
return self._tabstops.get(0, None)
else:
self._cts, ts = res
return ts
return self._tabstops[self._cts]
def _get_tabstop(self, requester, no):
# SnippetInstances are completely self contained, therefore, we do not
# need to ask our parent for Tabstops
cached_parent = self._parent
self._parent = None
rv = EditableTextObject._get_tabstop(self, requester, no)
self._parent = cached_parent
return rv
class _VimCursor(NoneditableTextObject):
"""Helper class to keep track of the Vim Cursor when text objects expand
and move."""
def __init__(self, parent):
NoneditableTextObject.__init__(
self, parent, _vim.buf.cursor, _vim.buf.cursor,
tiebreaker=Position(-1, -1))
def to_vim(self):
"""Moves the cursor in the Vim to our position."""
assert self._start == self._end
_vim.buf.cursor = self._start
|
"""Docstring
"""
import cv2
#import numpy as np
def find_in_face(haarcascade, rec=False):
"""Press 'k' for quit
"""
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
smile_cascade = cv2.CascadeClassifier(haarcascade)
cap = cv2.VideoCapture(0)
if rec:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (640, 480))
while True:
_, original = cap.read()
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (fx, fy, fw, fh) in faces:
cv2.rectangle(original, pt1=(fx, fy), pt2=(
fx+fw, fy+fh), color=(0, 0, 255), thickness=2)
roi_gray = gray[fy:fy+fh, fx:fx+fw]
roi_color = original[fy:fy+fh, fx:fx+fw]
smiles = smile_cascade.detectMultiScale(roi_gray)
for (sx, sy, sw, sh) in smiles:
cv2.rectangle(roi_color, pt1=(sx, sy), pt2=(
sx+sw, sy+sh), color=(255, 0, 0), thickness=2)
if rec:
out.write(original)
cv2.imshow('Image', original)
if cv2.waitKey(1) & 0xFF == ord('k'):
break
cap.release()
if rec:
out.release()
cv2.destroyAllWindows()
find_in_face('haarcascade_eye.xml', rec=False)
|
import torch.nn as nn
import torch
class ProtoNetBig(nn.Module):
def __init__(self, x_dim=23433, hid_dim=[2000, 1000, 500, 250], z_dim=100):
super(ProtoNetBig, self).__init__()
self.linear0 = nn.Linear(x_dim, hid_dim[0])
self.bn1 = nn.BatchNorm1d(hid_dim[0])
self.linear1 = nn.Linear(hid_dim[0], hid_dim[1])
self.bn2 = nn.BatchNorm1d(hid_dim[1])
self.linear2 = nn.Linear(hid_dim[1] + hid_dim[0], hid_dim[2])
self.bn3 = nn.BatchNorm1d(hid_dim[2])
self.linear3 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2], hid_dim[3])
self.bn4 = nn.BatchNorm1d(hid_dim[3])
self.linear4 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2] + hid_dim[3], z_dim)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(inplace=True)
def forward(self, x):
out = self.dropout(self.bn1(self.relu(self.linear0(x))))
out1 = self.dropout(self.bn2(self.relu(self.linear1(out))))
out2 = torch.cat([out, out1], 1)
out3 = self.dropout(self.bn3(self.relu(self.linear2(out2))))
out4 = torch.cat([out, out1, out3], 1)
out5 = self.dropout(self.bn4(self.relu(self.linear3(out4))))
out6 = torch.cat([out, out1, out3, out5], 1)
out7 = self.linear4(out6)
return out7
|
from gym.envs.registration import register
register(
id='scaled-riverswim-v0',
entry_point='riverswim_variants.envs:ScaledRiverSwimEnv',
max_episode_steps=20,
)
register(
id='stochastic-riverswim-v0',
entry_point='riverswim_variants.envs:StochasticRiverSwimEnv',
max_episode_steps=20,
)
register(
id='skewed-stochastic-riverswim-v0',
entry_point='riverswim_variants.envs:SkewedStochasticRiverSwimEnv',
max_episode_steps=20,
)
|
import torch
import torch.nn as nn
import torch.nn.init as init
from .nets.backbone import HourglassBackbone, SuperpointBackbone
from .nets.junction_decoder import SuperpointDecoder
from .nets.heatmap_decoder import PixelShuffleDecoder
from .nets.descriptor_decoder import SuperpointDescriptor
def get_model(model_cfg=None, loss_weights=None, mode="train"):
""" Get model based on the model configuration. """
# Check dataset config is given
if model_cfg is None:
raise ValueError("[Error] The model config is required!")
# List the supported options here
print("\n\n\t--------Initializing model----------")
supported_arch = ["simple"]
if not model_cfg["model_architecture"] in supported_arch:
raise ValueError(
"[Error] The model architecture is not in supported arch!")
if model_cfg["model_architecture"] == "simple":
model = SOLD2Net(model_cfg)
else:
raise ValueError(
"[Error] The model architecture is not in supported arch!")
# Optionally register loss weights to the model
if mode == "train":
if loss_weights is not None:
for param_name, param in loss_weights.items():
if isinstance(param, nn.Parameter):
print("\t [Debug] Adding %s with value %f to model"
% (param_name, param.item()))
model.register_parameter(param_name, param)
else:
raise ValueError(
"[Error] the loss weights can not be None in dynamic weighting mode during training.")
# Display some summary info.
print("\tModel architecture: %s" % model_cfg["model_architecture"])
print("\tBackbone: %s" % model_cfg["backbone"])
print("\tJunction decoder: %s" % model_cfg["junction_decoder"])
print("\tHeatmap decoder: %s" % model_cfg["heatmap_decoder"])
print("\t-------------------------------------")
return model
class SOLD2Net(nn.Module):
""" Full network for SOLD². """
def __init__(self, model_cfg):
super(SOLD2Net, self).__init__()
self.name = model_cfg["model_name"]
self.cfg = model_cfg
# List supported network options
self.supported_backbone = ["lcnn", "superpoint"]
self.backbone_net, self.feat_channel = self.get_backbone()
# List supported junction decoder options
self.supported_junction_decoder = ["superpoint_decoder"]
self.junction_decoder = self.get_junction_decoder()
# List supported heatmap decoder options
self.supported_heatmap_decoder = ["pixel_shuffle",
"pixel_shuffle_single"]
self.heatmap_decoder = self.get_heatmap_decoder()
# List supported descriptor decoder options
if "descriptor_decoder" in self.cfg:
self.supported_descriptor_decoder = ["superpoint_descriptor"]
self.descriptor_decoder = self.get_descriptor_decoder()
# Initialize the model weights
self.apply(weight_init)
def forward(self, input_images):
# The backbone
features = self.backbone_net(input_images)
# junction decoder
junctions = self.junction_decoder(features)
# heatmap decoder
heatmaps = self.heatmap_decoder(features)
outputs = {"junctions": junctions, "heatmap": heatmaps}
# Descriptor decoder
if "descriptor_decoder" in self.cfg:
outputs["descriptors"] = self.descriptor_decoder(features)
return outputs
def get_backbone(self):
""" Retrieve the backbone encoder network. """
if not self.cfg["backbone"] in self.supported_backbone:
raise ValueError(
"[Error] The backbone selection is not supported.")
# lcnn backbone (stacked hourglass)
if self.cfg["backbone"] == "lcnn":
backbone_cfg = self.cfg["backbone_cfg"]
backbone = HourglassBackbone(**backbone_cfg)
feat_channel = 256
elif self.cfg["backbone"] == "superpoint":
backbone_cfg = self.cfg["backbone_cfg"]
backbone = SuperpointBackbone()
feat_channel = 128
else:
raise ValueError(
"[Error] The backbone selection is not supported.")
return backbone, feat_channel
def get_junction_decoder(self):
""" Get the junction decoder. """
if (not self.cfg["junction_decoder"]
in self.supported_junction_decoder):
raise ValueError(
"[Error] The junction decoder selection is not supported.")
# superpoint decoder
if self.cfg["junction_decoder"] == "superpoint_decoder":
decoder = SuperpointDecoder(self.feat_channel,
self.cfg["backbone"])
else:
raise ValueError(
"[Error] The junction decoder selection is not supported.")
return decoder
def get_heatmap_decoder(self):
""" Get the heatmap decoder. """
if not self.cfg["heatmap_decoder"] in self.supported_heatmap_decoder:
raise ValueError(
"[Error] The heatmap decoder selection is not supported.")
# Pixel_shuffle decoder
if self.cfg["heatmap_decoder"] == "pixel_shuffle":
if self.cfg["backbone"] == "lcnn":
decoder = PixelShuffleDecoder(self.feat_channel,
num_upsample=2)
elif self.cfg["backbone"] == "superpoint":
decoder = PixelShuffleDecoder(self.feat_channel,
num_upsample=3)
else:
raise ValueError("[Error] Unknown backbone option.")
# Pixel_shuffle decoder with single channel output
elif self.cfg["heatmap_decoder"] == "pixel_shuffle_single":
if self.cfg["backbone"] == "lcnn":
decoder = PixelShuffleDecoder(
self.feat_channel, num_upsample=2, output_channel=1)
elif self.cfg["backbone"] == "superpoint":
decoder = PixelShuffleDecoder(
self.feat_channel, num_upsample=3, output_channel=1)
else:
raise ValueError("[Error] Unknown backbone option.")
else:
raise ValueError(
"[Error] The heatmap decoder selection is not supported.")
return decoder
def get_descriptor_decoder(self):
""" Get the descriptor decoder. """
if (not self.cfg["descriptor_decoder"]
in self.supported_descriptor_decoder):
raise ValueError(
"[Error] The descriptor decoder selection is not supported.")
# SuperPoint descriptor
if self.cfg["descriptor_decoder"] == "superpoint_descriptor":
decoder = SuperpointDescriptor(self.feat_channel)
else:
raise ValueError(
"[Error] The descriptor decoder selection is not supported.")
return decoder
def weight_init(m):
""" Weight initialization function. """
# Conv2D
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
# Batchnorm
elif isinstance(m, nn.BatchNorm2d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
# Linear
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
else:
pass
|
import numpy as np
import numpy.testing as npt
from dipy.reconst.peaks import default_sphere, peaks_from_model
def test_PeaksAndMetricsDirectionGetter():
class SillyModel(object):
def fit(self, data, mask=None):
return SillyFit(self)
class SillyFit(object):
def __init__(self, model):
self.model = model
def odf(self, sphere):
odf = np.zeros(sphere.theta.shape)
r = np.random.randint(0, len(odf))
odf[r] = 1
return odf
def get_direction(dg, point, dir):
newdir = dir.copy()
state = dg.get_direction(point, newdir)
return (state, np.array(newdir))
data = np.random.random((3, 4, 5, 2))
peaks = peaks_from_model(SillyModel(), data, default_sphere,
relative_peak_threshold=.5,
min_separation_angle=25)
peaks._initialize()
up = np.zeros(3)
up[2] = 1.
down = -up
for i in range(3-1):
for j in range(4-1):
for k in range(5-1):
point = np.array([i, j, k], dtype=float)
# Test that the angle threshold rejects points
peaks.ang_thr = 0.
state, nd = get_direction(peaks, point, up)
npt.assert_equal(state, 1)
# Here we leverage the fact that we know Hemispheres project
# all their vertices into the z >= 0 half of the sphere.
peaks.ang_thr = 90.
state, nd = get_direction(peaks, point, up)
npt.assert_equal(state, 0)
expected_dir = peaks.peak_dirs[i, j, k, 0]
npt.assert_array_almost_equal(nd, expected_dir)
state, nd = get_direction(peaks, point, down)
npt.assert_array_almost_equal(nd, -expected_dir)
# Check that we can get directions at non-integer points
point += np.random.random(3)
state, nd = get_direction(peaks, point, up)
npt.assert_equal(state, 0)
# Check that points are rounded to get initial direction
point -= .5
id = peaks.initial_direction(point)
# id should be a (1, 3) array
npt.assert_array_almost_equal(id, [expected_dir])
if __name__ == "__main__":
npt.run_module_suite()
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
from utils.general import get_logger
from utils.test_env import EnvTest
from q1_schedule import LinearExploration, LinearSchedule
from q2_linear import Linear
from configs.q3_nature import config
class NatureQN(Linear):
"""
Implementing DeepMind's Nature paper. Here are the relevant urls.
https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
"""
def get_q_values_op(self, state, scope, reuse=False):
"""
Returns Q values for all actions
Args:
state: (tf tensor)
shape = (batch_size, img height, img width, nchannels)
scope: (string) scope name, that specifies if target network or not
reuse: (bool) reuse of variables in the scope
Returns:
out: (tf tensor) of shape = (batch_size, num_actions)
"""
# this information might be useful
num_actions = self.env.action_space.n
##############################################################
"""
TODO: implement the computation of Q values like in the paper
https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
you may find the section "model architecture" of the appendix of the
nature paper particulary useful.
store your result in out of shape = (batch_size, num_actions)
HINT: you may find tensorflow.contrib.layers useful (imported)
make sure to understand the use of the scope param
make sure to flatten() the tensor before connecting it to fully connected layers
you can use any other methods from tensorflow
you are not allowed to import extra packages (like keras,
lasagne, cafe, etc.)
"""
##############################################################
################ YOUR CODE HERE - 10-15 lines ################
with tf.variable_scope(scope, reuse):
conv1 = layers.conv2d(inputs = state, num_outputs = 32,
kernel_size = [8, 8], stride = 4,
activation_fn = tf.nn.relu,
reuse = reuse, scope = "Conv1")
conv2 = layers.conv2d(inputs = conv1, num_outputs = 64,
kernel_size = [4, 4], stride = 2,
activation_fn = tf.nn.relu,
reuse=reuse, scope = "Conv2")
conv3 = layers.conv2d(inputs=conv2, num_outputs=64,
kernel_size=[3, 3], stride=1,
activation_fn=tf.nn.relu,
reuse=reuse, scope="Conv3")
flattened = layers.flatten(conv3, scope = "flattened")
hidden_fc = layers.fully_connected(inputs = flattened,
num_outputs = 512,
activation_fn = tf.nn.relu,
reuse=reuse, scope = "hidden-fc")
out = layers.fully_connected(inputs = hidden_fc,
num_outputs = num_actions,
activation_fn = None,
reuse=reuse, scope = "output-Q")
##############################################################
######################## END YOUR CODE #######################
return out
"""
Use deep Q network for test environment.
"""
if __name__ == '__main__':
env = EnvTest((80, 80, 1))
# exploration strategy
exp_schedule = LinearExploration(env, config.eps_begin,
config.eps_end, config.eps_nsteps)
# learning rate schedule
lr_schedule = LinearSchedule(config.lr_begin, config.lr_end,
config.lr_nsteps)
# train model
model = NatureQN(env, config)
model.run(exp_schedule, lr_schedule)
|
"""Complex Step derivative approximations."""
from __future__ import division, print_function
from itertools import groupby
from six.moves import range
import numpy as np
from openmdao.approximation_schemes.approximation_scheme import ApproximationScheme
from openmdao.utils.name_maps import abs_key2rel_key
DEFAULT_CS_OPTIONS = {
'step': 1e-15,
'form': 'forward',
}
class ComplexStep(ApproximationScheme):
r"""
Approximation scheme using complex step to calculate derivatives.
For example, using a step size of 'h' will approximate the derivative in
the following way:
.. math::
f'(x) = \Im{\frac{f(x+ih)}{h}}.
Attributes
----------
_exec_list : list
A list of which derivatives (in execution order) to compute.
The entries are of the form (of, wrt, options), where of and wrt are absolute names
and options is a dictionary.
"""
def __init__(self):
"""
Initialize the ApproximationScheme.
"""
super(ComplexStep, self).__init__()
self._exec_list = []
def add_approximation(self, abs_key, kwargs):
"""
Use this approximation scheme to approximate the derivative d(of)/d(wrt).
Parameters
----------
abs_key : tuple(str,str)
Absolute name pairing of (of, wrt) for the derivative.
kwargs : dict
Additional keyword arguments, to be interpreted by sub-classes.
"""
of, wrt = abs_key
options = DEFAULT_CS_OPTIONS.copy()
options.update(kwargs)
self._exec_list.append((of, wrt, options))
@staticmethod
def _key_fun(approx_tuple):
"""
Compute the sorting key for an approximation tuple.
Parameters
----------
approx_tuple : tuple(str, str, dict)
A given approximated derivative (of, wrt, options)
Returns
-------
tuple(str, str, float)
Sorting key (wrt, form, step_size)
"""
options = approx_tuple[2]
return (approx_tuple[1], options['form'], options['step'])
def _init_approximations(self):
"""
Prepare for later approximations.
"""
# itertools.groupby works like `uniq` rather than the SQL query, meaning that it will only
# group adjacent items with identical keys.
self._exec_list.sort(key=self._key_fun)
# TODO: Automatic sparse FD by constructing a graph of variable dependence?
def compute_approximations(self, system, jac=None, deriv_type='partial'):
"""
Execute the system to compute the approximate sub-Jacobians.
Parameters
----------
system : System
System on which the execution is run.
jac : None or dict-like
If None, update system with the approximated sub-Jacobians. Otherwise, store the
approximations in the given dict-like object.
deriv_type : str
One of 'total' or 'partial', indicating if total or partial derivatives are
being approximated.
"""
if jac is None:
jac = system._jacobian
if deriv_type == 'total':
current_vec = system._outputs
elif deriv_type == 'partial':
current_vec = system._residuals
else:
raise ValueError('deriv_type must be one of "total" or "partial"')
# Turn on complex step.
system._inputs._vector_info._under_complex_step = True
# create a scratch array
out_tmp = system._outputs.get_data()
results_clone = current_vec._clone(True)
# To support driver src_indices, we need to override some checks in Jacobian, but do it
# selectively.
uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \
not isinstance(jac, dict)
for key, approximations in groupby(self._exec_list, self._key_fun):
# groupby (along with this key function) will group all 'of's that have the same wrt and
# step size.
wrt, form, delta = key
if form == 'reverse':
delta *= -1.0
fact = 1.0 / delta
if wrt in system._owns_approx_wrt_idx:
in_idx = system._owns_approx_wrt_idx[wrt]
in_size = len(in_idx)
else:
if wrt in system._var_abs2meta:
in_size = system._var_abs2meta[wrt]['size']
in_idx = range(in_size)
outputs = []
# Note: If access to `approximations` is required again in the future, we will need to
# throw it in a list first. The groupby iterator only works once.
for approx_tuple in approximations:
of = approx_tuple[0]
# TODO: Sparse derivatives
if of in system._owns_approx_of_idx:
out_idx = system._owns_approx_of_idx[of]
out_size = len(out_idx)
else:
out_size = system._var_abs2meta[of]['size']
outputs.append((of, np.zeros((out_size, in_size))))
for i_count, idx in enumerate(in_idx):
# Run the Finite Difference
input_delta = [(wrt, idx, delta)]
result = self._run_point_complex(system, input_delta, out_tmp, results_clone,
deriv_type)
for of, subjac in outputs:
if of in system._owns_approx_of_idx:
out_idx = system._owns_approx_of_idx[of]
subjac[:, i_count] = result._imag_views_flat[of][out_idx] * fact
else:
subjac[:, i_count] = result._imag_views_flat[of] * fact
for of, subjac in outputs:
rel_key = abs_key2rel_key(system, (of, wrt))
if uses_src_indices:
jac._override_checks = True
jac[rel_key] = subjac
if uses_src_indices:
jac._override_checks = False
# Turn off complex step.
system._inputs._vector_info._under_complex_step = False
def _run_point_complex(self, system, input_deltas, out_tmp, result_clone, deriv_type='partial'):
"""
Perturb the system inputs with a complex step, runs, and returns the results.
Parameters
----------
system : System
The system having its derivs approximated.
input_deltas : list
List of (input name, indices, delta) tuples, where input name is an absolute name.
out_tmp : ndarray
An array the same size as the system outputs that is used for temporary storage.
result_clone : Vector
A vector cloned from the outputs vector. Used to store the results.
deriv_type : str
One of 'total' or 'partial', indicating if total or partial derivatives are being
approximated.
Returns
-------
Vector
Copy of the results from running the perturbed system.
"""
# TODO: MPI
inputs = system._inputs
outputs = system._outputs
if deriv_type == 'total':
run_model = system.run_solve_nonlinear
results_vec = outputs
elif deriv_type == 'partial':
run_model = system.run_apply_nonlinear
results_vec = system._residuals
else:
raise ValueError('deriv_type must be one of "total" or "partial"')
for in_name, idxs, delta in input_deltas:
if in_name in outputs._imag_views_flat:
outputs._imag_views_flat[in_name][idxs] += delta
else:
inputs._imag_views_flat[in_name][idxs] += delta
results_vec.get_data(out_tmp)
run_model()
# TODO: Grab only results of interest
result_clone.set_vec(results_vec)
results_vec.set_data(out_tmp)
for in_name, idxs, delta in input_deltas:
if in_name in outputs._imag_views_flat:
outputs._imag_views_flat[in_name][idxs] -= delta
else:
inputs._imag_views_flat[in_name][idxs] -= delta
return result_clone
|
from iconic_matcher import IconicMatcher
#from realign4d import TimeSeries, realign4d, resample4d
import transform
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
#!/bin/python
# argument processing
import sys, getopt
# date and time
import datetime
import pytz
# weather
from weather import weatherFormat, twoColumn
from ansi import ansi_escape
# graphics/image
import PIL # requires python-pillow
from PIL import Image
# webcams
import webcam
# for organ
import requests
from io import BytesIO
# default options
doWeather = True
doImage = True
whichWebcam = 0
# logical input
# -w -- do weather
# -i -- do image
# integer input
# -c -- (1,2) for (lake_view, tree_view) webcam
# arguemnt processing
myopts, args = getopt.getopt(sys.argv[1:], 'w:i:c:')
# o -- option
# a -- argument
for o, a in myopts:
if o == '-w':
doWeather = bool(int(a))
elif o == '-i':
doImage = bool(int(a))
elif o == '-c':
whichWebcam = int(a)
else:
print(o, a)
print('Usage: {:s}')
# date/time in Montana
now = datetime.datetime.now(pytz.timezone('America/Denver'))
print('The time in Las Cruces is:')
print(now.strftime('%a %w %b %Y %H:%M:%S %Z'))
# days until
fname = '/home/wcdawn/hailey/next_visit.txt'
fobj = open(fname, 'r')
date_str = fobj.readlines()
fobj.close()
# strip removes leading whitespace, trailing whitespace, and newline characters
date_str = date_str[0].strip()
next_visit = datetime.datetime.strptime(date_str, '%Y-%m-%d')
now = datetime.datetime.now()
diff = next_visit - now
print()
print('Days until next visit: {:d}'.format(diff.days + 1))
# display an image
image_fname = '/home/wcdawn/hailey/christmas_pic/portland_canard.jpg'
if (doImage):
image = Image.open(image_fname)
maxsize = (640, 640)
image.thumbnail(maxsize, PIL.Image.ANTIALIAS)
image.show()
# weather
location_dict = {
'Missoula': [46.856339, -113.995292],
'Flathead': [47.876957, -114.032290]}
location_dict = {
'Las Cruces': [32.288111, -106.743986]}
if (doWeather):
weather_list = []
for key in location_dict:
weather_list.append(weatherFormat(key, location_dict[key][0],
location_dict[key][1]))
if (len(location_dict) == 1):
for i in range(len(weather_list[0])):
print(weather_list[0][i])
elif (len(location_dict) == 2):
padded_width = 40
for i in range(len(weather_list[0])):
blank_size = padded_width - len(ansi_escape.sub('', weather_list[0][i]))
print(weather_list[0][i] + blank_size * ' ' + weather_list[1][i])
# webcams
# http://webcam.flbs.umt.edu/view/viewer_index.shtml?id=2731
lake_view = 'http://webcam.flbs.umt.edu/mjpg/video.mjpg'
# http://webcam2.flbs.umt.edu/view/viewer_index.shtml?id=4824
tree_view = 'http://webcam2.flbs.umt.edu/mjpg/video.mjpg'
# https://weather.nmsu.edu/webcams/nmcc-fbg/
organ_view = 'https://weather.nmsu.edu/files/cameras/nmcc-fbg/nmcc-fbg.jpg'
if (whichWebcam == 1):
webcam.dispWebcam(lake_view)
elif (whichWebcam == 2):
webcam.dispWebcam(tree_view)
elif (whichWebcam == 3):
response = requests.get(organ_view)
image = Image.open(BytesIO(response.content))
maxsize = (640, 640)
image.thumbnail(maxsize, PIL.Image.ANTIALIAS)
image.show()
|
import datetime as dt
import pytest
from note_clerk import planning
@pytest.mark.parametrize(
"date, quarter",
[
(dt.datetime(2020, 1, 1), dt.datetime(2020, 1, 1)),
(dt.datetime(2020, 1, 2), dt.datetime(2020, 1, 1)),
(dt.datetime(2020, 4, 1), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 4, 2), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 5, 2), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 6, 2), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 7, 2), dt.datetime(2020, 7, 1)),
(dt.datetime(2020, 8, 2), dt.datetime(2020, 7, 1)),
(dt.datetime(2020, 9, 2), dt.datetime(2020, 7, 1)),
(dt.datetime(2020, 10, 2), dt.datetime(2020, 10, 1)),
(dt.datetime(2020, 11, 2), dt.datetime(2020, 10, 1)),
(dt.datetime(2020, 12, 2), dt.datetime(2020, 10, 1)),
],
)
def test_quarter_start(date: dt.datetime, quarter: dt.datetime) -> None:
adjusted = planning.quarter_start(date)
assert adjusted == quarter
def print_with_header(header: str, text: str) -> None:
line = "*" * (len(header) + 4)
print(f"{line}\n* {header} *\n{line}\n{text}")
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('release', '0002_auto_20150512_0719'),
('package', '0002_auto_20150512_0714'),
]
operations = [
migrations.AddField(
model_name='buildimage',
name='releases',
field=models.ManyToManyField(to='release.Release'),
),
]
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
#-- @testpoint:opengauss关键字reloptions(非保留),作为目录对象名
'''
import unittest
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Hostname(unittest.TestCase):
def setUp(self):
logger.info("------------------------ Opengauss_Function_Keyword_Reloptions_Case0020 开始执行--------------------------")
# 关键字作为目录对象名不带双引号 - 成功
def test_reloptions_1(self):
SqlMdg = commonsh.execut_db_sql('''create directory reloptions as '/tmp/';
drop directory reloptions;''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带双引号—成功
def test_reloptions_2(self):
SqlMdg = commonsh.execut_db_sql('''create directory "reloptions" as '/tmp/';
drop directory "reloptions";''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带单引号 - 合理报错
def test_reloptions_3(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists 'reloptions';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql(''' create directory 'reloptions' as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
#关键字作为目录对象名带反引号 - 合理报错
def test_reloptions_4(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists \`reloptions\`;''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql('''create directory \`reloptions\` as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
def tearDown(self):
logger.info('------------------------ Opengauss_Function_Keyword_Reloptions_Case0020 执行结束--------------------------')
|
# encoding: latin2
"""Data generator module
"""
__author__ = "Juan C. Duque, Alejandro Betancourt"
__credits__ = "Copyright (c) 2009-10 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
from weightsFromAreas import weightsFromAreas
from intersections import fixIntersections
from transformations import dict2matrix
from transformations import dict2sparseMatrix
from output import dict2gal, dict2csv
|
"""
WSGI config for derrida project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "derrida.settings")
application = get_wsgi_application()
|
from contextlib import contextmanager
import json
import os
import logging
import sys
import subprocess
from typing import Optional, Tuple
import pytest
logger = logging.getLogger(__name__)
@contextmanager
def set_env_var(key: str, val: Optional[str] = None):
old_val = os.environ.get(key, None)
if val is not None:
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
yield
if key in os.environ:
del os.environ[key]
if old_val is not None:
os.environ[key] = old_val
@pytest.fixture
def ray_start_stop():
subprocess.check_output(["ray", "start", "--head"])
try:
with set_env_var("RAY_ADDRESS", "http://127.0.0.1:8265"):
yield
finally:
subprocess.check_output(["ray", "stop", "--force"])
@contextmanager
def ray_cluster_manager():
"""
Used not as fixture in case we want to set RAY_ADDRESS first.
"""
subprocess.check_output(["ray", "start", "--head"])
try:
yield
finally:
subprocess.check_output(["ray", "stop", "--force"])
def _run_cmd(cmd: str, should_fail=False) -> Tuple[str, str]:
"""Convenience wrapper for subprocess.run.
We always run with shell=True to simulate the CLI.
Asserts that the process succeeds/fails depending on should_fail.
Returns (stdout, stderr).
"""
print(f"Running command: '{cmd}'")
p: subprocess.CompletedProcess = subprocess.run(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if p.returncode == 0:
print("Command succeeded.")
if should_fail:
raise RuntimeError(
f"Expected command to fail, but got exit code: {p.returncode}."
)
else:
print(f"Command failed with exit code: {p.returncode}.")
if not should_fail:
raise RuntimeError(
f"Expected command to succeed, but got exit code: {p.returncode}."
)
return p.stdout.decode("utf-8"), p.stderr.decode("utf-8")
class TestJobSubmitHook:
"""Tests the RAY_JOB_SUBMIT_HOOK env var."""
def test_hook(self, ray_start_stop):
with set_env_var("RAY_JOB_SUBMIT_HOOK", "ray._private.test_utils.job_hook"):
stdout, _ = _run_cmd("ray job submit -- echo hello")
assert "hook intercepted: echo hello" in stdout
class TestRayAddress:
"""
Integration version of job CLI test that ensures interaction with the
following components are working as expected:
1) Ray client: use of RAY_ADDRESS and ray.init() in job_head.py
2) Ray dashboard: `ray start --head`
"""
def test_empty_ray_address(self, ray_start_stop):
with set_env_var("RAY_ADDRESS", None):
stdout, _ = _run_cmd("ray job submit -- echo hello")
assert "hello" in stdout
assert "succeeded" in stdout
@pytest.mark.parametrize(
"ray_client_address", ["127.0.0.1:8265", "ray://127.0.0.1:8265"]
)
def test_ray_client_address(self, ray_start_stop, ray_client_address: str):
with set_env_var("RAY_ADDRESS", ray_client_address):
_run_cmd("ray job submit -- echo hello", should_fail=True)
def test_valid_http_ray_address(self, ray_start_stop):
stdout, _ = _run_cmd("ray job submit -- echo hello")
assert "hello" in stdout
assert "succeeded" in stdout
class TestJobSubmit:
def test_basic_submit(self, ray_start_stop):
"""Should tail logs and wait for process to exit."""
cmd = "sleep 1 && echo hello && sleep 1 && echo hello"
stdout, _ = _run_cmd(f"ray job submit -- bash -c '{cmd}'")
assert "hello\nhello" in stdout
assert "succeeded" in stdout
def test_submit_no_wait(self, ray_start_stop):
"""Should exit immediately w/o printing logs."""
cmd = "echo hello && sleep 1000"
stdout, _ = _run_cmd(f"ray job submit --no-wait -- bash -c '{cmd}'")
assert "hello" not in stdout
assert "Tailing logs until the job exits" not in stdout
class TestJobStop:
def test_basic_stop(self, ray_start_stop):
"""Should wait until the job is stopped."""
cmd = "sleep 1000"
job_id = "test_basic_stop"
_run_cmd(f"ray job submit --no-wait --job-id={job_id} -- {cmd}")
stdout, _ = _run_cmd(f"ray job stop {job_id}")
assert "Waiting for job" in stdout
assert f"Job '{job_id}' was stopped" in stdout
def test_stop_no_wait(self, ray_start_stop):
"""Should not wait until the job is stopped."""
cmd = "echo hello && sleep 1000"
job_id = "test_stop_no_wait"
_run_cmd(f"ray job submit --no-wait --job-id={job_id} -- bash -c '{cmd}'")
stdout, _ = _run_cmd(f"ray job stop --no-wait {job_id}")
assert "Waiting for job" not in stdout
assert f"Job '{job_id}' was stopped" not in stdout
class TestJobList:
def test_empty(self, ray_start_stop):
stdout, _ = _run_cmd("ray job list")
assert "{}" in stdout
def test_list(self, ray_start_stop):
_run_cmd("ray job submit --job-id='hello_id' -- echo hello")
runtime_env = {"env_vars": {"TEST": "123"}}
_run_cmd(
"ray job submit --job-id='hi_id' "
f"--runtime-env-json='{json.dumps(runtime_env)}' -- echo hi"
)
stdout, _ = _run_cmd("ray job list")
assert "JobInfo" in stdout
assert "123" in stdout
assert "hello_id" in stdout
assert "hi_id" in stdout
def test_quote_escaping(ray_start_stop):
cmd = "echo \"hello 'world'\""
job_id = "test_quote_escaping"
stdout, _ = _run_cmd(
f"ray job submit --job-id={job_id} -- {cmd}",
)
assert "hello 'world'" in stdout
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
#!/usr/bin/python
'''
(C) Copyright 2017-2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from __future__ import print_function
import subprocess
def check_for_pool(host, uuid):
"""
Function to check if pool folder exist on server
Args:
host: Server host name
uuid: Pool uuid to check if exists
return:
resp: subprocess return code
"""
cmd = "test -e /mnt/daos/" + uuid
resp = subprocess.call(["ssh", host, cmd])
if resp == 0:
print ('%s exists' %uuid)
else:
print ('%s does not exist' %uuid)
return resp
def cleanup_pools(hosts):
"""
To cleanup the pool and content from /mnt/daps/
Args:
hosts[list]: Lists of servers name
return"
None
"""
for host in hosts:
cmd = "rm -rf /mnt/daos/*"
subprocess.call(["ssh", host, cmd])
|
import pytest
from thefuck.rules.git_rebase_no_changes import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def output():
return '''Applying: Test commit
No changes - did you forget to use 'git add'?
If there is nothing left to stage, chances are that something else
already introduced the same changes; you might want to skip this patch.
When you have resolved this problem, run "git rebase --continue".
If you prefer to skip this patch, run "git rebase --skip" instead.
To check out the original branch and stop rebasing, run "git rebase --abort".
'''
def test_match(output):
assert match(Command('git rebase --continue', output))
assert not match(Command('git rebase --continue', ''))
assert not match(Command('git rebase --skip', ''))
def test_get_new_command(output):
assert (get_new_command(Command('git rebase --continue', output)) ==
'git rebase --skip')
|
# TODO
import sys
from sys import argv
import sqlite3
if len(argv) != 2:
print("Usage: python roster.py Gryffindor")
sys.exit(1)
#setting house choice
house_choice = argv[1].lower()
#working on database
db_file = 'students.db'
conn = sqlite3.connect(db_file)
c = conn.cursor()
#connect to db and retrieve house names
#todo: get rid of DISTINCT
c.execute('''SELECT DISTINCT house from students''')
houses = c.fetchall()
if house_choice not in houses:
print(f'{house_choice} not house in houses.Houses are: {houses}')
#retrieve name and birth of persons in that house
c.execute(f'''SELECT first, middle, last, birth FROM students WHERE lower(house)="{house_choice}" ORDER BY last, first''')
roster = c.fetchall()
#since the middle name will be None if not present it's easier to append with each row
names_ = list
for row in roster:
if row[1] != None:
print(f'{row[0]} {row[1]} {row[2]} born, {row[3]}')
else:
print(f'{row[0]} {row[2]} born, {row[3]}')
|
"""
base16vlq.py
base16 unsigned variable length quantity (VLQ)
based on
https://gist.github.com/mjpieters/86b0d152bb51d5f5979346d11005588b
https://github.com/Rich-Harris/vlq
to encode *signed* integers, we would need _abc_len == 17
python -c $'from base16vlq import encode\nfor n in range(0, 64):\n print(f"{n:3d} {encode(n):<3s} ", end="")\n if (n+1) % 8 == 0:\n print()'
_shift_size = 3
_carry_flag = 8 = 1000 = 2^3
_mask = 7 = 111 = 2^3-1
_len_abc = 16 = 10000 = 2^4
_bytemax = 15 = 1111 = 2^4-1
_abc_chars = (,:<[$*?)~=>]@&%
0 ( 1 , 2 : 3 < 4 [ 5 $ 6 * 7 ?
8 ), 9 ~, 10 =, 11 >, 12 ], 13 @, 14 &, 15 %,
16 ): 17 ~: 18 =: 19 >: 20 ]: 21 @: 22 &: 23 %:
24 )< 25 ~< 26 =< 27 >< 28 ]< 29 @< 30 &< 31 %<
32 )[ 33 ~[ 34 =[ 35 >[ 36 ][ 37 @[ 38 &[ 39 %[
40 )$ 41 ~$ 42 =$ 43 >$ 44 ]$ 45 @$ 46 &$ 47 %$
48 )* 49 ~* 50 =* 51 >* 52 ]* 53 @* 54 &* 55 %*
56 )? 57 ~? 58 =? 59 >? 60 ]? 61 @? 62 &? 63 %?
"""
from typing import List
_abc_chars = b"""(,:<[$*?)~=>]@&%"""
# 0123456701234567
# remaining special chars: {}#"'^`;|
_abc_table = [None] * (max(_abc_chars) + 1)
for i, b in enumerate(_abc_chars):
_abc_table[b] = i
#_shift_size = 5 # base64
_shift_size = 3 # base16
# one bit is needed for the carry_flag
_carry_flag = 1 << _shift_size
_mask = (1 << _shift_size) - 1 # 2^4-1 = 15
_bytemax = _mask | _carry_flag
_len_abc = _bytemax + 1 # unsigned
#_len_abc = _bytemax + 2 # signed?
if False:
print(f"_shift_size = {_shift_size}")
print(f"_carry_flag = {_carry_flag}")
print(f"_mask = {_mask}")
print(f"_bytemax = {_bytemax}")
print(f"_abc_chars = {_abc_chars.decode()}")
print(f"_len_abc = {_len_abc}")
assert len(_abc_chars) == _len_abc
def decode(vlq_code: str) -> List[int]:
"""Decode Base16 VLQ value"""
num_list = []
shift_size, carry_flag, mask = _shift_size, _carry_flag, _mask
shift = num = 0
# use byte values and a table to go from base16 characters to integers
for clamped in map(_abc_table.__getitem__, vlq_code.encode("ascii")):
num += (clamped & mask) << shift
if clamped & carry_flag:
shift += shift_size
continue
## read sign bit
#num_sign = -1 if (num & 1) else +1
#num = (num >> 1) * num_sign
num_list.append(num)
shift = num = 0
return num_list
def encode(*num_list: int) -> str:
"""Encode integers to a VLQ value"""
clamped_list = []
shift_size = _shift_size
carry_flag = _carry_flag
mask = _mask
for num in num_list:
## write sign bit
#num = (abs(num) << 1) | int(num < 0)
if type(num) != int or num < 0:
raise ValueError("num must be unsigned integer")
while True:
clamped = num & mask
num = num >> shift_size
if num > 0:
clamped = clamped | carry_flag
clamped_list.append(clamped)
if num == 0:
break
return bytes(map(_abc_chars.__getitem__, clamped_list)).decode()
# python -c 'from base16vlq import _test; _test()'
def _test():
"""throws on error"""
for num in range(0, 1024):
arr1 = [num, num]
code = encode(*arr1)
arr2 = decode(code)
if not arr1 == arr2:
print(f"arr1 = {arr1}")
print(f"code = {code}")
print(f"arr2 = {arr2}")
assert arr1 == arr2
assert decode(encode(1234))[0] == 1234
try:
encode(-1)
except ValueError:
pass
try:
encode(1.1)
except ValueError:
pass
try:
encode("a")
except ValueError:
pass
|
'''
Utility functions.
'''
import argparse
import functools
import itertools
import os
import sqlite3 as sql
from contextlib import closing
from copy import deepcopy
from itertools import repeat
import numpy as np
import pandas as pd
import scipy as sp
import scipy.fftpack
import scipy.signal
from cnld import abstract
from scipy.spatial.distance import cdist
''' GEOMETRY-RELATED FUNCTIONS '''
def meshview(v1, v2, v3, mode='cartesian', as_list=True):
'''
'''
if mode.lower() in ('cart', 'cartesian'):
x, y, z = np.meshgrid(v1, v2, v3, indexing='ij')
elif mode.lower() in ('sph', 'spherical'):
r, theta, phi = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij')
x, y, z = sph2cart(r, theta, phi)
elif mode.lower() in ('sec', 'sector'):
r, alpha, beta = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij')
x, y, z = sec2cart(r, alpha, beta)
elif mode.lower() in ('dp', 'dpolar'):
r, alpha, beta = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij')
x, y, z = dp2cart(r, alpha, beta)
if as_list:
return np.c_[x.ravel('F'), y.ravel('F'), z.ravel('F')]
else:
return x, y, z
def sec2cart(r, alpha, beta):
'''
'''
z = r / np.sqrt(np.tan(alpha)**2 + np.tan(beta)**2 + 1)
x = z * np.tan(alpha)
y = z * np.tan(beta)
# alpha_p = np.arctan(np.tan(alpha) * np.cos(beta))
# x = np.sin(alpha_p) * r
# y = -np.sin(beta) * r * np.cos(alpha_p)
# z = np.sqrt(r**2 - x**2 - y**2)
# px = -px
# pyp = np.arctan(np.cos(px) * np.sin(py) / np.cos(py))
# x = r * np.sin(pyp)
# y = -r * np.cos(pyp) * np.sin(px)
# z = r * np.cos(px) * np.cos(pyp)
return x, y, z
def cart2sec(x, y, z):
'''
'''
r = np.sqrt(x**2 + y**2 + z**2)
alpha = np.arccos(z / (np.sqrt(x**2 + z**2))) * np.sign(x)
beta = np.arccos(z / (np.sqrt(y**2 + z**2))) * np.sign(y)
# r = np.sqrt(x**2 + y**2 + z**2)
# alpha_p = np.arcsin(x / r)
# beta = -np.arcsin(-y / r / np.cos(alpha_p))
# alpha = np.arctan(np.tan(alpha_p) / np.cos(beta))
return r, alpha, beta
def sph2cart(r, theta, phi):
'''
'''
x = r * np.cos(theta) * np.sin(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(phi)
return x, y, z
def cart2sph(x, y, z):
'''
'''
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arctan(y / x)
phi = np.arccos(z / r)
return r, theta, phi
def cart2dp(x, y, z):
'''
'''
r = np.sqrt(x**2 + y**2 + z**2)
alpha = np.arccos((np.sqrt(y**2 + z**2) / r))
beta = np.arccos((np.sqrt(x**2 + z**2) / r))
return r, alpha, beta
def dp2cart(r, alpha, beta):
'''
'''
z = r * (1 - np.sin(alpha)**2 - np.sin(beta)**2)
x = r * np.sin(alpha)
y = r * np.sin(beta)
return x, y, z
def rotation_matrix(vec, angle):
'''
'''
if isinstance(vec, str):
string = vec.lower()
if string == 'x':
vec = [1, 0, 0]
elif string == '-x':
vec = [-1, 0, 0]
elif string == 'y':
vec = [0, 1, 0]
elif string == '-y':
vec = [0, -1, 0]
elif string == 'z':
vec = [0, 0, 1]
elif string == '-x':
vec = [0, 0, -1]
x, y, z = vec
a = angle
r = np.zeros((3, 3))
r[0, 0] = np.cos(a) + x**2 * (1 - np.cos(a))
r[0, 1] = x * y * (1 - np.cos(a)) - z * np.sin(a)
r[0, 2] = x * z * (1 - np.cos(a)) + y * np.sin(a)
r[1, 0] = y * x * (1 - np.cos(a)) + z * np.sin(a)
r[1, 1] = np.cos(a) + y**2 * (1 - np.cos(a))
r[1, 2] = y * z * (1 - np.cos(a)) - x * np.sin(a)
r[2, 0] = z * x * (1 - np.cos(a)) - z * np.sin(a)
r[2, 1] = z * y * (1 - np.cos(a)) + x * np.sin(a)
r[2, 2] = np.cos(a) + z**2 * (1 - np.cos(a))
return r
def rotate_nodes(nodes, vec, angle):
'''
'''
rmatrix = rotation_matrix(vec, angle)
return rmatrix.dot(nodes.T).T
def distance(*args):
'''
'''
return cdist(*np.atleast_2d(*args))
''' SIGNAL PROCESSING AND RF DATA FUNCTIONS '''
def gausspulse(fc, fbw, fs):
'''
'''
cutoff = scipy.signal.gausspulse('cutoff', fc=fc, bw=fbw, tpr=-100, bwr=-3)
adj_cutoff = np.ceil(cutoff * fs) / fs
t = np.arange(-adj_cutoff, adj_cutoff + 1 / fs, 1 / fs)
pulse, _ = sp.signal.gausspulse(t, fc=fc, bw=fbw, retquad=True, bwr=-3)
return pulse, t
def nextpow2(n):
'''
'''
return 2**int(np.ceil(np.log2(n)))
def envelope(rf_data, N=None, axis=-1):
'''
'''
return np.abs(scipy.signal.hilbert(np.atleast_2d(rf_data), N, axis=axis))
def qbutter(x, fn, fs=1, btype='lowpass', n=4, plot=False, axis=-1):
'''
'''
wn = fn / (fs / 2.)
b, a = sp.signal.butter(n, wn, btype)
fx = sp.signal.lfilter(b, a, x, axis=axis)
return fx
def qfirwin(x,
fn,
fs=1,
btype='lowpass',
ntaps=80,
plot=False,
axis=-1,
window='hamming'):
'''
'''
if btype.lower() in ('lowpass', 'low'):
pass_zero = 1
elif btype.lower() in ('bandpass', 'band'):
pass_zero = 0
elif btype.lower() in ('highpass', 'high'):
pass_zero = 0
wn = fn / (fs / 2.)
b = sp.signal.firwin(ntaps, wn, pass_zero=pass_zero, window=window)
fx = np.apply_along_axis(lambda x: np.convolve(x, b), axis, x)
return fx
def qfft(s, nfft=None, fs=1, dr=100, fig=None, **kwargs):
'''
Quick FFT plot. Returns frequency bins and FFT in dB.
'''
s = np.atleast_2d(s)
nsig, nsample = s.shape
if nfft is None:
nfft = nsample
# if fig is None:
# fig = plt.figure(tight_layout=1)
# ax = fig.add_subplot(111)
# else:
# ax = fig.get_axes()[0]
if nfft > nsample:
s = np.pad(s, ((0, 0), (0, nfft - nsample)), mode='constant')
elif nfft < nsample:
s = s[:, :nfft]
ft = sp.fftpack.fft(s, axis=1)
freqs = sp.fftpack.fftfreq(nfft, 1 / fs)
ftdb = 20 * np.log10(np.abs(ft) / (np.max(np.abs(ft), axis=1)[..., None]))
ftdb[ftdb < -dr] = -dr
cutoff = (nfft + 1) // 2
# ax.plot(freqs[:cutoff], ftdb[:, :cutoff].T, **kwargs)
# ax.set_xlabel('Frequency (Hz)')
# ax.set_ylabel('Magnitude (dB re max)')
# fig.show()
return freqs[:cutoff], ftdb[:, :cutoff]
''' JOB-RELATED FUNCTIONS '''
def chunks(iterable, n):
res = []
for el in iterable:
res.append(el)
if len(res) == n:
yield res
res = []
if res:
yield res
def create_jobs(*args, mode='zip', is_complete=None):
'''
Convenience function for creating jobs (sets of input arguments) for
multiprocessing Pool. Supports zip and product combinations, and automatic chunking
of iterables.
'''
static_args = list()
static_idx = list()
iterable_args = list()
iterable_idx = list()
for arg_no, arg in enumerate(args):
if isinstance(arg, (tuple, list)):
iterable, chunksize = arg
if chunksize == 1:
iterable_args.append(iterable)
else:
iterable_args.append(chunks(iterable, chunksize))
iterable_idx.append(arg_no)
else:
static_args.append(itertools.repeat(arg))
static_idx.append(arg_no)
if not iterable_args and not static_args:
return
if not iterable_args:
yield 1, tuple(args[i] for i in static_idx)
if not static_args:
repeats = itertools.repeat(())
else:
repeats = zip(*static_args)
if mode.lower() == 'product':
combos = itertools.product(*iterable_args)
elif mode.lower() == 'zip':
combos = zip(*iterable_args)
elif mode.lower() == 'zip_longest':
combos = itertools.zip_longest(*iterable_args)
for job_id, (r, p) in enumerate(zip(repeats, combos)):
# skip jobs that have been completed
if is_complete is not None and is_complete[job_id]:
continue
res = r + p
# reorder vals according to input order
yield job_id + 1, tuple(res[i] for i in np.argsort(static_idx + iterable_idx))
''' DATABASE FUNCTIONS '''
def open_db(f):
def decorator(firstarg, *args, **kwargs):
if isinstance(firstarg, sql.Connection):
return f(firstarg, *args, **kwargs)
else:
# if os.path.isfile(firstarg):
with closing(sql.connect(firstarg)) as con:
return f(con, *args, **kwargs)
# else:
# raise IOError
return decorator
def read_db(f):
def decorator(firstarg, *args, **kwargs):
if isinstance(firstarg, sql.Connection):
return f(firstarg, *args, **kwargs)
else:
if os.path.isfile(firstarg):
with closing(sql.connect(firstarg)) as con:
return f(con, *args, **kwargs)
else:
raise IOError('File does not exist')
return decorator
@open_db
def table_exists(con, name):
query = '''SELECT count(*) FROM sqlite_master WHERE type='table' and name=?'''
return con.execute(query, (name, )).fetchone()[0] != 0
@open_db
def create_metadata_table(con, **kwargs):
table = [[str(v) for v in list(kwargs.values())]]
columns = list(kwargs.keys())
pd.DataFrame(table, columns=columns, dtype=str).to_sql('metadata',
con,
if_exists='replace',
index=False)
@open_db
def create_progress_table(con, njobs):
with con:
# create table
con.execute(
'CREATE TABLE progress (job_id INTEGER PRIMARY KEY, is_complete boolean)')
# insert values
con.executemany('INSERT INTO progress (is_complete) VALUES (?)',
repeat((False, ), njobs))
@open_db
def get_progress(con):
table = pd.read_sql('SELECT is_complete FROM progress ORDER BY job_id', con)
is_complete = np.array(table).squeeze()
ijob = sum(is_complete) + 1
return is_complete, ijob
@open_db
def update_progress(con, job_id):
with con:
con.execute('UPDATE progress SET is_complete=1 WHERE job_id=?', [
job_id,
])
''' SCRIPTING FUNCTIONS '''
def script_parser(main, config_def):
'''
General script command-line interface with 'config' and 'run' subcommands.
'''
if isinstance(config_def, dict):
# create config abstract type based on supplied dict
Config = abstract.register_type('Config', config_def)
else:
# config abstract type already defined
Config = config_def
# config subcommand generates a default configuration template
def config(args):
if args.file:
abstract.dump(Config(), args.file)
else:
print(Config())
# run subcommand will load the config file and pass to main
def run(args):
if args.config:
cfg = Config(**abstract.load(args.config))
else:
cfg = Config()
return main(cfg, args)
# create argument parser
parser = argparse.ArgumentParser()
# define config subparser
subparsers = parser.add_subparsers(help='sub-command help')
config_parser = subparsers.add_parser('config', help='config_help')
config_parser.add_argument('-f', '--file', nargs='?')
config_parser.set_defaults(func=config)
# define run subparser
run_parser = subparsers.add_parser('run', help='run_help')
run_parser.add_argument('config', nargs='?')
run_parser.add_argument('-f', '--file', nargs='?')
run_parser.add_argument('-t', '--threads', nargs='?', type=int)
run_parser.add_argument('-w', '--write-over', action='store_true')
run_parser.set_defaults(func=run)
return parser, run_parser
def script_parser2(main, config_def):
'''
General script command-line interface with 'config' and 'run' subcommands.
'''
if isinstance(config_def, dict):
# create config abstract type based on supplied dict
Config = abstract.register_type('Config', config_def)
else:
# config abstract type already defined
Config = config_def
# run
def run(args):
if args.show_config:
print(Config())
return
if args.generate_config:
abstract.dump(Config(), args.generate_config)
return
if args.file:
if args.config:
cfg = Config(**abstract.load(args.config))
else:
cfg = Config()
return main(cfg, args)
# create argument parser
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--generate-config')
parser.add_argument('-s', '--show-config', action='store_true')
parser.add_argument('file', nargs='?')
parser.add_argument('-c', '--config')
parser.add_argument('-t', '--threads', type=int)
parser.add_argument('-w', '--write-over', action='store_true')
parser.set_defaults(func=run)
return parser
''' MISC FUNCTIONS '''
def memoize_old(func):
'''
Simple memoizer to cache repeated function calls.
'''
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
def make_hashable(obj):
if not ishashable(obj):
# use tostring on ndarray since str returns truncated output
if isinstance(obj, np.ndarray):
return obj.tostring()
return str(obj)
# round float arguments to avoid round-off error affecting cache
if isinstance(obj, float):
return round(obj, 18)
return obj
memo = {}
@functools.wraps(func)
def decorator(*args, **kwargs):
# key = tuple(make_hashable(a) for a in args)
key = (tuple(make_hashable(a) for a in args),
tuple((k, make_hashable(v)) for k, v in sorted(kwargs.items())))
if key not in memo:
memo[key] = func(*args, **kwargs)
# return a deep copy to avoid issues with mutable return objects
return deepcopy(memo[key])
return decorator
def memoize(func, maxsize=20):
'''
Simple memoizer to cache repeated function calls.
'''
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
def make_hashable(obj):
if hasattr(obj, '_memoize'):
return obj._memoize()
if not ishashable(obj):
# use tostring on ndarray since str returns truncated output
if isinstance(obj, np.ndarray):
return obj.tostring()
return str(obj)
# round float arguments to avoid round-off error affecting cache
if isinstance(obj, float):
return round(obj, 18)
return obj
func.memo = {}
@functools.wraps(func)
def decorator(*args, **kwargs):
# key = tuple(make_hashable(a) for a in args)
key = (tuple(make_hashable(a) for a in args),
tuple((k, make_hashable(v)) for k, v in sorted(kwargs.items())))
if key not in func.memo:
if len(func.memo) > maxsize:
return func(*args, **kwargs)
else:
func.memo[key] = func(*args, **kwargs)
# return a deep copy to avoid issues with mutable return objects
return deepcopy(func.memo[key])
return decorator
class Counter:
def __init__(self):
self.count = 0
def increment(self, *args, **kwargs):
self.count += 1
def decrement(self, *args, **kwargs):
self.count -= 1
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_adjoint
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
LinearOperatorAdjoint = linear_operator_adjoint.LinearOperatorAdjoint # pylint: disable=invalid-name
class LinearOperatorAdjointTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
def _operator_and_matrix(self,
build_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
if ensure_self_adjoint_and_pd:
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
else:
matrix = linear_operator_test_util.random_tril_matrix(
shape, dtype, force_well_conditioned=True, remove_upper=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
if ensure_self_adjoint_and_pd:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorFullMatrix(
lin_op_matrix, is_positive_definite=True, is_self_adjoint=True))
else:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorLowerTriangular(lin_op_matrix))
return operator, linalg.adjoint(matrix)
def test_base_operator_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
operator_adjoint = LinearOperatorAdjoint(operator)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_supplied_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
operator_adjoint = LinearOperatorAdjoint(
operator,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_contradicting_hints_raise(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=False)
with self.assertRaisesRegexp(ValueError, "positive-definite"):
LinearOperatorAdjoint(operator, is_positive_definite=True)
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False)
with self.assertRaisesRegexp(ValueError, "self-adjoint"):
LinearOperatorAdjoint(operator, is_self_adjoint=True)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, name="my_operator", is_non_singular=True)
operator = LinearOperatorAdjoint(operator)
self.assertEqual("my_operator_adjoint", operator.name)
class LinearOperatorAdjointNonSquareTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Tests done in the base class NonSquareLinearOperatorDerivedClassTest."""
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape_before_adjoint = list(build_info.shape)
# We need to swap the last two dimensions because we are taking the adjoint
# of this operator
shape_before_adjoint[-1], shape_before_adjoint[-2] = (
shape_before_adjoint[-2], shape_before_adjoint[-1])
matrix = linear_operator_test_util.random_normal(
shape_before_adjoint, dtype=dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = LinearOperatorAdjoint(
linalg.LinearOperatorFullMatrix(lin_op_matrix))
return operator, linalg.adjoint(matrix)
if __name__ == "__main__":
test.main()
|
# main.py
# python 2.7
# Handy Employee Management System (HEMS)
# Basic CLI Employee Management System that interfaces with
# a database.
# Chris Bugg
# Created: 5/10/17
# Imports
import os
import re
import sys
from handler import Handler
from employee import Employee
# Main Class
class HEMS:
# Handles Database interaction
database_handler = Handler()
# Constructor
def __init__(self):
# Main loop
while True:
# Clear Screen
os.system('cls' if os.name == 'nt' else 'clear')
print
print "Handy IT Employee Management System [HEMS]"
print
print "(S)earch"
print "(A)dd"
print "(U)pdate"
print "(R)emove"
print
print "(E)xit"
print
choice = raw_input('Selection: ')
# Input Sanitation
good_choices = {"s", "S", "a", "A", "u", "U", "r", "R", "e", "E"}
while choice not in good_choices:
print "Input Error!"
choice = raw_input("Selection: ")
# Search
if (choice == "s") or (choice == "S"):
self.search_screen()
# Add
elif (choice == "a") or (choice == "A"):
self.add_screen()
# Update
elif (choice == "u") or (choice == "U"):
self.update_screen()
# Remove
elif (choice == "r") or (choice == "R"):
self.remove_screen()
# Exit on any other selection
else:
sys.exit(0)
# Prints search screen
def search_screen(self):
while True:
# Clear Screen
os.system('cls' if os.name == 'nt' else 'clear')
print
print "HEMS -> Search"
print
print "(E)mployee Identification Number (EIN)"
print "(S)SN"
print "(F)irst Name"
print "(L)ast Name"
print "(P)ayrate"
print "(A)ll Employees"
print
print "(B)ack"
print
choice = raw_input('Selection: ')
# Input Sanitation
good_choices = {"e", "E", "s", "S", "f", "F", "l", "L", "p", "P", "a", "A", "b", "B"}
while choice not in good_choices:
print "Input Error!"
choice = raw_input("Selection: ")
# Clear Screen
os.system('cls' if os.name == 'nt' else 'clear')
# Employee Identification Number (EIN)
if (choice == "e") or (choice == "E"):
input = raw_input("Employee Identification Number (EIN): ")
# Input Sanitation
input = self.sanitize_digits(input)
# Perform Database search
employees = self.search('ein', input)
self.search_results(employees)
# SSN
elif (choice == "s") or (choice == "S"):
input = raw_input("SSN (555-55-5555): ")
# Input Sanitation
input = self.sanitize_ssn(input)
# Perform Database search
employees = self.search('ssn', input)
self.search_results(employees)
# First Name
elif (choice == "f") or (choice == "F"):
input = raw_input("First Name: ")
# Input Sanitation
input = self.sanitize_letters(input)
# Perform Database search
employees = self.search('first', input)
self.search_results(employees)
# Last Name
elif (choice == "l") or (choice == "L"):
input = raw_input("Last name: ")
# Input Sanitation
input = self.sanitize_letters(input)
# Perform Database search
employees = self.search('last', input)
self.search_results(employees)
# Payrate
elif (choice == "p") or (choice == "P"):
input = raw_input("Payrate: ")
# Input Sanitation
input = self.sanitize_digits(input)
# Perform Database search
employees = self.search('payrate', input)
self.search_results(employees)
# All Employees
elif (choice == "a") or (choice == "A"):
# Perform Database search
employees = self.search_all()
self.search_results(employees)
# Exit on any other selection
else:
# Break out of while and go back to main screen
break
# Searches Database based on given fields
def search(self, column, query):
return self.database_handler.search(column, query)
# Searches Database based on given fields
def search_all(self):
return self.database_handler.search_all()
# Sanitizes inputs to digits (really integers)
def sanitize_digits(self, input):
# If the input isn't all digits
while not input.isdigit():
# Ask the user to try again
print "Input Error! Not an Integer!"
input = raw_input("Input: ")
return input
# Sanitizes input to letters (a-z,A-Z)
def sanitize_letters(self, input):
# If the string isn't all alphabetic characters
while not input.isalpha():
# Ask the user to try again
print "Input Error! Not all Letters!"
input = raw_input("Input: ")
return input
# Sanitizes inputs to SSNs (555-55-5555)
def sanitize_ssn(self, input):
# Run till they put it in right
while True:
# Regex magic that matches an SSN
ssn_matcher = re.compile('\d{3}-\d{2}-\d{4}')
# A list of all valid SSN's in the input
matches = ssn_matcher.findall(input)
# If the list is non-empty
if matches:
# Retun the first valid SSN
return matches[0]
# Ask the user to try again
print "Input Error! Not a Valid SSN (555-55-5555)!"
input = raw_input("Input: ")
# Prints add screen
def add_screen(self):
# Clear Screen
os.system('cls' if os.name == 'nt' else 'clear')
print
print "HEMS -> Add"
print
# Create new Employee object
employee = Employee()
# Get info from user + sanitize
employee.ein = self.sanitize_digits(raw_input("Employee Identification Number (EIN): "))
# Check if EIN is already in the system
employee_list = self.search('ein', employee.ein)
# While there are employees whom match that EIN
while employee_list:
# Try again
print "Input Error! Employee already exists!"
employee.ein = self.sanitize_digits(raw_input("Employee Identification Number (EIN): "))
# And re-check
employee_list = self.search('ein', employee.ein)
employee.ssn = self.sanitize_ssn(raw_input("SSN: "))
employee.first_name = self.sanitize_letters(raw_input("First Name: "))
employee.last_name = self.sanitize_letters(raw_input("Last Name: "))
employee.payrate = self.sanitize_digits(raw_input("Payrate: "))
# Add employee to database
self.add(employee)
print
print "Employee Added"
print
raw_input("Back (Enter): ")
# Adds employee to database
def add(self, employee):
self.database_handler.add(employee)
# Prints remove screen
def remove_screen(self):
# Clear Screen
os.system('cls' if os.name == 'nt' else 'clear')
print
print "HEMS -> Remove"
print
# Create new Employee object
employee = Employee()
# Get info from user + sanitize
employee.ein = self.sanitize_digits(raw_input("Employee Identification Number (EIN): "))
print "ARE YOU SURE YOU WISH TO REMOVE THIS USER?"
print "YES - Remove User"
print "NO - Do Nothing"
print
choice = raw_input('Selection (YES[Remove]/NO[Do Nothing]): ')
# Input Sanitation
good_choices = {"YES", "NO", "N", "no", "n", "0"}
while choice not in good_choices:
print "Input Error!"
choice = raw_input("Selection (YES[Remove]/NO[Do Nothing]): ")
# Remove
if choice == "YES":
# Remove employee from database
self.remove(employee.ein)
print
print "Employee Removed"
print
else:
print
raw_input("Back (Enter): ")
# Removes employee from database
def remove(self, ein):
self.database_handler.remove(ein)
# Prints update screen
def update_screen(self):
# Clear Screen
os.system('cls' if os.name == 'nt' else 'clear')
print
print "HEMS -> Update"
print
# Create new Employee object
employee = Employee()
# Get info from user + sanitize
employee.ein = self.sanitize_digits(raw_input("Employee Identification Number (EIN): "))
# Check if EIN is already in the system
employee_list = self.search('ein', employee.ein)
# While there are not employees whom match that EIN
while not employee_list:
# Try again
print "Input Error! No Employees match that EIN!"
employee.ein = self.sanitize_digits(raw_input("Employee Identification Number (EIN): "))
# And re-check
employee_list = self.search('ein', employee.ein)
employee.ssn = self.sanitize_ssn(raw_input("SSN: "))
employee.first_name = self.sanitize_letters(raw_input("First Name: "))
employee.last_name = self.sanitize_letters(raw_input("Last Name: "))
employee.payrate = self.sanitize_digits(raw_input("Payrate: "))
# Add employee to database
self.update(employee)
print
print "Employee Updated"
print
raw_input("Back (Enter): ")
# Updates employee in database
def update(self, employee):
self.database_handler.update(employee)
# Prints employee information to the screen
def employee_print(self, employee):
print "EIN:\t " + str(employee.ein)
print "SSN:\t " + str(employee.ssn)
print "First:\t " + str(employee.first_name)
print "Last:\t " + str(employee.last_name)
print "Payrate: " + str(employee.payrate)
# Prints the results of a search
def search_results(self, employees):
print "Results: "
# For each employee found, print details
for employee in employees:
self.employee_print(employee)
print
# If there were none found
if not employees:
print "No Results Found."
print
raw_input("Back (Enter): ")
HEMS()
|
from .PyrezException import PyrezException
class SessionLimit(PyrezException):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
#
# PySNMP MIB module BIANCA-BRICK-SIF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BIANCA-BRICK-SIF-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:38:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
DisplayString, = mibBuilder.importSymbols("RFC1158-MIB", "DisplayString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, TimeTicks, Counter32, IpAddress, Gauge32, ModuleIdentity, ObjectIdentity, MibIdentifier, Unsigned32, enterprises, NotificationType, Bits, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "TimeTicks", "Counter32", "IpAddress", "Gauge32", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "Unsigned32", "enterprises", "NotificationType", "Bits", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
bintec = MibIdentifier((1, 3, 6, 1, 4, 1, 272))
bibo = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4))
biboip = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5))
ipSifAliasAddressTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 28), )
if mibBuilder.loadTexts: ipSifAliasAddressTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressTable.setDescription('Contains a alias Address Entry Index,Ip,Mask,Interface ')
ipSifAliasAddressEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasAddressAlias"))
if mibBuilder.loadTexts: ipSifAliasAddressEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressEntry.setDescription('Contents a Stateful inspection Firewall description for a alias Name')
ipSifAliasAddressIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifAliasAddressIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressIndex.setDescription('The Index for the address alias')
ipSifAliasAddressAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressAlias.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressAlias.setDescription('Alias Name for the Address Entry')
ipSifAliasAddressAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressAddress.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressAddress.setDescription('The ip-address for the Alias')
ipSifAliasAddressMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressMask.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressMask.setDescription('The ip Mask for the Alias')
ipSifAliasAddressInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressInterface.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressInterface.setDescription('The interface index for the alias')
ipSifAliasAddressMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("interface", 1), ("address", 2), ("range", 3), ("delete", 4))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressMode.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressMode.setDescription('Address or Interface Mode')
ipSifAliasAddressRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressRange.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressRange.setDescription('The ip Range for the Alias')
ipSifAliasAddressGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressGroup.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressGroup.setDescription('For values greater than zero this entry determines the IP address group this entry belongs to, see also ipSifAliasAddressGroupId')
ipSifAliasServiceTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 29), )
if mibBuilder.loadTexts: ipSifAliasServiceTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceTable.setDescription('Contains a alias Service Entry Protocol,Port,Range ')
ipSifAliasServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasServiceAlias"))
if mibBuilder.loadTexts: ipSifAliasServiceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceEntry.setDescription('Contains a alias Service Entry Protocol,Port,Range ')
ipSifAliasServiceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifAliasServiceIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceIndex.setDescription('The Index for the address alias')
ipSifAliasServiceAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceAlias.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceAlias.setDescription('Alias Name for the Service Entry')
ipSifAliasServiceProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6, 8, 9, 12, 16, 17, 20, 22, 27, 41, 46, 47, 50, 51, 56, 57, 65, 80, 88, 89, 94, 103, 111, 112, 115, 250, 251, 252, 253, 254, 255, 256))).clone(namedValues=NamedValues(("icmp", 1), ("igmp", 2), ("ggp", 3), ("ip", 4), ("tcp", 6), ("egp", 8), ("igp", 9), ("pup", 12), ("chaos", 16), ("udp", 17), ("hmp", 20), ("xns-idp", 22), ("rdp", 27), ("ipv6", 41), ("rsvp", 46), ("gre", 47), ("esp", 50), ("ah", 51), ("tlsp", 56), ("skip", 57), ("kryptolan", 65), ("iso-ip", 80), ("igrp", 88), ("ospf", 89), ("ipip", 94), ("pim", 103), ("ipx-in-ip", 111), ("vrrp", 112), ("l2tp", 115), ("local", 250), ("internet", 251), ("netmeeting", 252), ("udptcp", 253), ("any", 254), ("delete", 255), ("dont-verify", 256))).clone('any')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceProtocol.setDescription('The protocol for the Service alias')
ipSifAliasServicePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 65535)).clone(-1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServicePort.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServicePort.setDescription('The Port for the Service Alias.')
ipSifAliasServiceRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65536)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceRange.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceRange.setDescription('The Port Range for the Service Alias.')
ipSifAliasServiceType = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("predefined", 1), ("custom", 2))).clone('custom')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifAliasServiceType.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceType.setDescription('Specifies wether created by the IP/SIF subsystem itself or created/modified by the administrator.')
ipSifAliasServiceGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceGroup.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceGroup.setDescription('For values greater than zero this entry determines the IP service group this entry belongs to, see also ipSifAliasServiceGroupId')
ipSifAliasServiceSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceSourcePort.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceSourcePort.setDescription('The Source Port for the Service Alias.')
ipSifAliasServiceSourceRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65536)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceSourceRange.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceSourceRange.setDescription('The Source Port Range for the Service Alias.')
ipSifAliasServiceIcmpType = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceIcmpType.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceIcmpType.setDescription('The ICMP Type for the Service Alias.')
ipSifAliasServiceIcmpCode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceIcmpCode.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceIcmpCode.setDescription('The ICMP Code for the Service Alias.')
ipSifAliasTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 30), )
if mibBuilder.loadTexts: ipSifAliasTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasTable.setDescription('Contains a Stateful Inspection Firewall (SIF) policy.')
ipSifAliasEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasOrder"))
if mibBuilder.loadTexts: ipSifAliasEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasEntry.setDescription('')
ipSifAliasOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasOrder.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasOrder.setDescription('The Order for the Stateful Inspection Entry rule')
ipSifAliasSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasSource.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasSource.setDescription('The alias Source Index for the Entry')
ipSifAliasDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasDestination.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasDestination.setDescription('The alias Destination Index for the Entry')
ipSifAliasService = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasService.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasService.setDescription('The alias Protocol/service Index for Entry')
ipSifAliasAction = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 255))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("reject", 3), ("delete", 255))).clone('access')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAction.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAction.setDescription('The Rule for the Filter')
ipSifAliasStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasStatus.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasStatus.setDescription('Defines the administrative status of this policy')
ipSifAliasPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("default", 1), ("low-latency", 2), ("high", 3), ("medium", 4), ("low", 5))).clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasPriority.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasPriority.setDescription('Defines the QoS priority of this policy')
ipSifAliasClassId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasClassId.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasClassId.setDescription('Internal ID for SIF policy to QoS policy mapping')
ipSifRejectTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 31), )
if mibBuilder.loadTexts: ipSifRejectTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectTable.setDescription('Contains actually rejected Frames with Source Destination ')
ipSifRejectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifRejectIndex"))
if mibBuilder.loadTexts: ipSifRejectEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectEntry.setDescription('')
ipSifRejectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectIndex.setDescription('The Index for the Reject Entry')
ipSifRejectSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectSource.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectSource.setDescription('The Reject Source for the Entry')
ipSifRejectDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectDestination.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectDestination.setDescription('The Reject Destination Index for the Entry')
ipSifRejectRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectRejects.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectRejects.setDescription('Count of rejected frames')
ipSifRejectSilence = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectSilence.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectSilence.setDescription('Last reject in seconds')
ipSifRejectProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectProtocol.setDescription('The protocol of the rejected Packet')
ipSifRejectPortLo = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectPortLo.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectPortLo.setDescription('The lowest Port rejected')
ipSifRejectPortHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifRejectPortHigh.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifRejectPortHigh.setDescription('The highest port rejected')
ipSifExpectTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 35), )
if mibBuilder.loadTexts: ipSifExpectTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectTable.setDescription('Contains expected Sessions with Source Destination ')
ipSifExpectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifExpectIndex"))
if mibBuilder.loadTexts: ipSifExpectEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectEntry.setDescription('')
ipSifExpectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectIndex.setDescription('This field is used for SIF-internal signalling and stores the index for the expected session for later matching.')
ipSifExpectSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectSource.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectSource.setDescription('The source-IP-address for the expected session. A value of 0 means ANY source-address.')
ipSifExpectDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectDestination.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectDestination.setDescription('The destination-IP-address for the expected session. A value of 0 means ANY destination-address.')
ipSifExpectProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 6, 17, 255))).clone(namedValues=NamedValues(("igmp", 2), ("ip", 4), ("tcp", 6), ("udp", 17), ("delete", 255))).clone('udp')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifExpectProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectProtocol.setDescription('The protocol of the expected session.')
ipSifExpectSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectSourcePort.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectSourcePort.setDescription('The source-port-number of the expected session. A value of 0 means ANY source-port-number.')
ipSifExpectDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectDestPort.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectDestPort.setDescription('The destination-port-number of the expected session. A value of 0 means ANY destination-port-number.')
ipSifExpectPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("default", 1), ("low-latency", 2), ("high", 3), ("medium", 4), ("low", 5))).clone('default')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectPriority.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectPriority.setDescription('Defines the QoS-priority/policy to be used for the expected SIF-session.')
ipSifExpectClassId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectClassId.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectClassId.setDescription('Internal ID for mapping SIF-policy to QoS-policy. Default-value of 0 means NOT SPECIFIED.')
ipSifExpectIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifExpectIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifExpectIfIndex.setDescription('Interface-index for which the session is expected. A value of 0 means ANY interface-index.')
ipSifAliasAddressGroupTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 47), )
if mibBuilder.loadTexts: ipSifAliasAddressGroupTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressGroupTable.setDescription('Defines IP address or interface group alias')
ipSifAliasAddressGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasAddressGroupId"))
if mibBuilder.loadTexts: ipSifAliasAddressGroupEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressGroupEntry.setDescription('Defines IP address or interface group alias')
ipSifAliasAddressGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressGroupId.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressGroupId.setDescription('The unique address group entry ID')
ipSifAliasAddressGroupAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressGroupAlias.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressGroupAlias.setDescription('Alias name for the address group entry')
ipSifAliasAddressGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifAliasAddressGroupIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressGroupIndex.setDescription('The index for the address group entry to be referred by an ipSifAlias entry')
ipSifAliasAddressGroupMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("interface", 1), ("address", 2), ("delete", 3))).clone('interface')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasAddressGroupMode.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasAddressGroupMode.setDescription('Specifies wether this entry defines an interface or address group')
ipSifAliasServiceGroupTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 48), )
if mibBuilder.loadTexts: ipSifAliasServiceGroupTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceGroupTable.setDescription('Defines IP service group alias')
ipSifAliasServiceGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasServiceGroupId"))
if mibBuilder.loadTexts: ipSifAliasServiceGroupEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceGroupEntry.setDescription('Defines IP service group alias')
ipSifAliasServiceGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceGroupId.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceGroupId.setDescription('The unique IP service group entry ID')
ipSifAliasServiceGroupAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceGroupAlias.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceGroupAlias.setDescription('Alias name for the IP service group entry')
ipSifAliasServiceGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifAliasServiceGroupIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceGroupIndex.setDescription('The index for the Ip service group entry to be referred by an ipSifAlias entry')
ipSifAliasServiceGroupMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("service", 1), ("delete", 2))).clone('service')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAliasServiceGroupMode.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAliasServiceGroupMode.setDescription('Specifies wether this entry defines an IP service group or should be deleted')
ipSifPolicyChkTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 49), )
if mibBuilder.loadTexts: ipSifPolicyChkTable.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkTable.setDescription("MIB interface in order to check the configured SIF polices: - for debugging purposes - for test applications - for configuration frontends NOTE: it's a stateless check, not based on a real IP session context ")
ipSifPolicyChkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifPolicyChkProtocol"), (0, "BIANCA-BRICK-SIF-MIB", "ipSifPolicyChkDestPort"))
if mibBuilder.loadTexts: ipSifPolicyChkEntry.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkEntry.setDescription('')
ipSifPolicyChkSourceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkSourceIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkSourceIfIndex.setDescription("The source interface index, for example '1' addresses the 'local' interface whereas '0' means 'don't check.")
ipSifPolicyChkDestIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkDestIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkDestIfIndex.setDescription("The destination interface index, for example '1' addresses the 'local' interface whereas '0' means 'don't check.'")
ipSifPolicyChkSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkSource.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkSource.setDescription("The source IP address, 0.0.0.0 means 'don't check'.")
ipSifPolicyChkDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkDestination.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkDestination.setDescription("The destination IP address, 0.0.0.0 means 'don't check'.")
ipSifPolicyChkProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkProtocol.setDescription("The IP protocol number to checked, '0' means 'don't check.")
ipSifPolicyChkDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkDestPort.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkDestPort.setDescription("The destination port number (UDP/TCP service) to checked, '0' means 'don't check.")
ipSifPolicyChkRule = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("reject", 3), ("unknown", 4))).clone('unknown')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifPolicyChkRule.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkRule.setDescription('Returns the associated policy, depending on: - ipSifAliasAction - ipSifAliasOrder - ipSifAliasStatus')
ipSifPolicyChkRuleOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifPolicyChkRuleOrder.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkRuleOrder.setDescription('Returns the associated policy order (see ipSifAliasOrder).')
ipSifPolicyChkResult = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("unknown", 3))).clone('unknown')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifPolicyChkResult.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkResult.setDescription('Returns the result depending on: - ipSifPolicyChkRule - administrative status (ipSifAdminStatus) - operational status of the SIF engine')
ipSifPolicyChkState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 1), ("trigger", 2), ("running", 3), ("done", 4))).clone('initial')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkState.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkState.setDescription('Displays the current status of the policy check, when setting to trigger(2) a new check will be initiated.')
ipSifPolicyChkAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("check", 1), ("ignore", 2))).clone('check')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkAdminStatus.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkAdminStatus.setDescription('Determines wether the ipSifAdminStatus should be considered for the policy (check (1)) or not (ingnore (2)).')
ipSifPolicyChkOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("check", 1), ("ignore", 2))).clone('check')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPolicyChkOperStatus.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkOperStatus.setDescription('Determines wether the SIF operational status should be considered for the policy (check (1)) or not (ingnore (2)).')
ipSifPolicyChkCurrOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2))).clone('down')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifPolicyChkCurrOperStatus.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPolicyChkCurrOperStatus.setDescription('The current SIF operational status.')
ipSif = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5, 37))
ipSifAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifAdminStatus.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifAdminStatus.setDescription('Enable or disable Stateful Inspection Firewall.')
ipSifLocalFilter = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifLocalFilter.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifLocalFilter.setDescription('Enable or disable filtering on local requests')
ipSifInterfaceFilter = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifInterfaceFilter.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifInterfaceFilter.setDescription('Enable or disable filtering on same Interface packets')
ipSifSysloglevel = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("deny", 1), ("accept", 2), ("verbose", 3), ("none", 4))).clone('verbose')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifSysloglevel.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifSysloglevel.setDescription('Levels for less or more Informations in the Syslog, verbose : display all Sif Activity deny : display only rejects, ignore accept : display only accpts none : disable Syslogs')
ipSifUdpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(180)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifUdpTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifUdpTimeout.setDescription('Timeout on inactive UDP Session')
ipSifTcpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(3600)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifTcpTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifTcpTimeout.setDescription('Timeout on inactive TCP Session')
ipSifPPTPTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(86400)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifPPTPTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifPPTPTimeout.setDescription('Timeout on inactive PPTP Session')
ipSifDefaultTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifDefaultTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifDefaultTimeout.setDescription('Timeout on all other ip Sessions')
ipSifMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifMaxSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifMaxSessions.setDescription('Maximum number of monitored sessions')
ipSifMaxRejectEntries = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000)).clone(1000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifMaxRejectEntries.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifMaxRejectEntries.setDescription('Maximum number of ipSifRejectTable entries')
ipSifMaxRejectTtl = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 86400)).clone(3600)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifMaxRejectTtl.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifMaxRejectTtl.setDescription('Maximum time to live of the ipSifRejectTable entries in seconds')
ipSifInterfaceAliasAutoCreate = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipSifInterfaceAliasAutoCreate.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifInterfaceAliasAutoCreate.setDescription('Enable or disable automatic creation of interface aliases (see ipSifAliasAddressTable) due to created MPR interfaces visible in ifTable.')
ipSifStat = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5, 46))
ipSifStatCurrSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatCurrSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatCurrSessions.setDescription('Current number of all monitored sessions')
ipSifStatCurrUdpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatCurrUdpSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatCurrUdpSessions.setDescription('Current number of monitored UDP sessions')
ipSifStatCurrTcpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatCurrTcpSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatCurrTcpSessions.setDescription('Current number of monitored TCP sessions')
ipSifStatCurrOtherSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatCurrOtherSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatCurrOtherSessions.setDescription('Current number of monitored non-TCP/UDP sessions')
ipSifStatCurrExpectedSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatCurrExpectedSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatCurrExpectedSessions.setDescription('Current number of created expected sessions')
ipSifStatTotalUdpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatTotalUdpSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatTotalUdpSessions.setDescription('Total number of monitored UDP sessions')
ipSifStatTotalTcpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatTotalTcpSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatTotalTcpSessions.setDescription('Total number of monitored TCP sessions')
ipSifStatTotalOtherSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatTotalOtherSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatTotalOtherSessions.setDescription('Total number of monitored non-TCP/UDP sessions')
ipSifStatTotalExpectedSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipSifStatTotalExpectedSessions.setStatus('mandatory')
if mibBuilder.loadTexts: ipSifStatTotalExpectedSessions.setDescription('Total number of monitored non-TCP/UDP sessions')
mibBuilder.exportSymbols("BIANCA-BRICK-SIF-MIB", ipSifPolicyChkDestPort=ipSifPolicyChkDestPort, ipSifAliasAddressEntry=ipSifAliasAddressEntry, ipSifAliasServiceType=ipSifAliasServiceType, ipSifPolicyChkRule=ipSifPolicyChkRule, ipSifExpectTable=ipSifExpectTable, ipSifInterfaceFilter=ipSifInterfaceFilter, ipSifAliasAddressGroupIndex=ipSifAliasAddressGroupIndex, ipSifAliasSource=ipSifAliasSource, ipSifRejectTable=ipSifRejectTable, ipSifMaxRejectEntries=ipSifMaxRejectEntries, ipSifAliasServiceRange=ipSifAliasServiceRange, ipSifStatCurrSessions=ipSifStatCurrSessions, ipSifExpectEntry=ipSifExpectEntry, ipSifAliasServiceGroupTable=ipSifAliasServiceGroupTable, ipSifPPTPTimeout=ipSifPPTPTimeout, ipSifExpectSourcePort=ipSifExpectSourcePort, ipSifRejectPortHigh=ipSifRejectPortHigh, ipSifLocalFilter=ipSifLocalFilter, ipSifStatCurrExpectedSessions=ipSifStatCurrExpectedSessions, ipSifPolicyChkSource=ipSifPolicyChkSource, ipSifAliasService=ipSifAliasService, ipSifAliasAddressGroupAlias=ipSifAliasAddressGroupAlias, ipSifStatTotalTcpSessions=ipSifStatTotalTcpSessions, ipSifAliasServicePort=ipSifAliasServicePort, ipSifAliasServiceGroupIndex=ipSifAliasServiceGroupIndex, ipSifExpectDestPort=ipSifExpectDestPort, ipSifAliasAddressIndex=ipSifAliasAddressIndex, ipSifPolicyChkRuleOrder=ipSifPolicyChkRuleOrder, ipSifRejectIndex=ipSifRejectIndex, ipSifPolicyChkTable=ipSifPolicyChkTable, ipSifAliasAddressRange=ipSifAliasAddressRange, ipSifAliasAddressTable=ipSifAliasAddressTable, ipSifExpectDestination=ipSifExpectDestination, ipSifPolicyChkState=ipSifPolicyChkState, ipSifAliasServiceGroupEntry=ipSifAliasServiceGroupEntry, ipSifAliasServiceGroupMode=ipSifAliasServiceGroupMode, ipSifTcpTimeout=ipSifTcpTimeout, ipSifAliasTable=ipSifAliasTable, bintec=bintec, ipSifAliasOrder=ipSifAliasOrder, ipSifExpectClassId=ipSifExpectClassId, ipSifStat=ipSifStat, ipSifPolicyChkCurrOperStatus=ipSifPolicyChkCurrOperStatus, ipSifAliasPriority=ipSifAliasPriority, ipSifStatCurrTcpSessions=ipSifStatCurrTcpSessions, ipSifMaxSessions=ipSifMaxSessions, ipSifRejectSource=ipSifRejectSource, ipSifAliasServiceIndex=ipSifAliasServiceIndex, ipSifPolicyChkDestination=ipSifPolicyChkDestination, ipSifAliasServiceGroupAlias=ipSifAliasServiceGroupAlias, ipSifAliasServiceAlias=ipSifAliasServiceAlias, ipSifExpectIfIndex=ipSifExpectIfIndex, ipSifAliasAddressGroupTable=ipSifAliasAddressGroupTable, ipSifPolicyChkEntry=ipSifPolicyChkEntry, ipSif=ipSif, ipSifPolicyChkOperStatus=ipSifPolicyChkOperStatus, ipSifStatCurrUdpSessions=ipSifStatCurrUdpSessions, ipSifRejectEntry=ipSifRejectEntry, ipSifRejectSilence=ipSifRejectSilence, ipSifAliasEntry=ipSifAliasEntry, ipSifAdminStatus=ipSifAdminStatus, ipSifAliasServiceIcmpType=ipSifAliasServiceIcmpType, ipSifAliasAddressGroupEntry=ipSifAliasAddressGroupEntry, ipSifPolicyChkResult=ipSifPolicyChkResult, ipSifAliasAddressMask=ipSifAliasAddressMask, ipSifAliasServiceEntry=ipSifAliasServiceEntry, ipSifAliasServiceSourcePort=ipSifAliasServiceSourcePort, ipSifPolicyChkSourceIfIndex=ipSifPolicyChkSourceIfIndex, ipSifAliasAddressMode=ipSifAliasAddressMode, bibo=bibo, ipSifAliasAddressAddress=ipSifAliasAddressAddress, ipSifUdpTimeout=ipSifUdpTimeout, ipSifStatTotalOtherSessions=ipSifStatTotalOtherSessions, ipSifRejectDestination=ipSifRejectDestination, ipSifPolicyChkAdminStatus=ipSifPolicyChkAdminStatus, ipSifStatTotalUdpSessions=ipSifStatTotalUdpSessions, ipSifPolicyChkProtocol=ipSifPolicyChkProtocol, ipSifAliasAddressGroup=ipSifAliasAddressGroup, ipSifRejectRejects=ipSifRejectRejects, ipSifAliasServiceSourceRange=ipSifAliasServiceSourceRange, ipSifAliasServiceTable=ipSifAliasServiceTable, ipSifMaxRejectTtl=ipSifMaxRejectTtl, ipSifAliasServiceGroupId=ipSifAliasServiceGroupId, ipSifExpectProtocol=ipSifExpectProtocol, ipSifExpectIndex=ipSifExpectIndex, ipSifAliasClassId=ipSifAliasClassId, ipSifAliasStatus=ipSifAliasStatus, biboip=biboip, ipSifInterfaceAliasAutoCreate=ipSifInterfaceAliasAutoCreate, ipSifAliasServiceIcmpCode=ipSifAliasServiceIcmpCode, ipSifAliasAddressInterface=ipSifAliasAddressInterface, ipSifAliasServiceProtocol=ipSifAliasServiceProtocol, ipSifAliasAddressGroupMode=ipSifAliasAddressGroupMode, ipSifRejectPortLo=ipSifRejectPortLo, ipSifAliasAddressAlias=ipSifAliasAddressAlias, ipSifAliasAction=ipSifAliasAction, ipSifRejectProtocol=ipSifRejectProtocol, ipSifStatCurrOtherSessions=ipSifStatCurrOtherSessions, ipSifStatTotalExpectedSessions=ipSifStatTotalExpectedSessions, ipSifExpectPriority=ipSifExpectPriority, ipSifDefaultTimeout=ipSifDefaultTimeout, ipSifExpectSource=ipSifExpectSource, ipSifPolicyChkDestIfIndex=ipSifPolicyChkDestIfIndex, ipSifAliasAddressGroupId=ipSifAliasAddressGroupId, ipSifAliasDestination=ipSifAliasDestination, ipSifSysloglevel=ipSifSysloglevel, ipSifAliasServiceGroup=ipSifAliasServiceGroup)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class TrustedIdProvidersOperations(object):
"""TrustedIdProvidersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update(
self, resource_group_name, account_name, trusted_id_provider_name, id_provider, name=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates the specified trusted identity provider. During
update, the trusted identity provider with the specified name will be
replaced with this new provider.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Store account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account to add or
replace the trusted identity provider.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider. This is used for differentiation of providers in the
account.
:type trusted_id_provider_name: str
:param id_provider: The URL of this trusted identity provider
:type id_provider: str
:param name: Resource name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TrustedIdProvider
<azure.mgmt.datalake.store.models.TrustedIdProvider>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TrustedIdProvider(name=name, id_provider=id_provider)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TrustedIdProvider')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrustedIdProvider', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, trusted_id_provider_name, id_provider=None, custom_headers=None, raw=False, **operation_config):
"""Updates the specified trusted identity provider.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Store account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account to which
to update the trusted identity provider.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider. This is used for differentiation of providers in the
account.
:type trusted_id_provider_name: str
:param id_provider: The URL of this trusted identity provider
:type id_provider: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TrustedIdProvider
<azure.mgmt.datalake.store.models.TrustedIdProvider>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = None
if id_provider is not None:
parameters = models.UpdateTrustedIdProviderParameters(id_provider=id_provider)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, 'UpdateTrustedIdProviderParameters')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrustedIdProvider', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified trusted identity provider from the specified Data
Lake Store account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Store account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account from
which to delete the trusted identity provider.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider to delete.
:type trusted_id_provider_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Data Lake Store trusted identity provider.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Store account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account from
which to get the trusted identity provider.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider to retrieve.
:type trusted_id_provider_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TrustedIdProvider
<azure.mgmt.datalake.store.models.TrustedIdProvider>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrustedIdProvider', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_account(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the Data Lake Store trusted identity providers within the
specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Store account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account from
which to get the trusted identity providers.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TrustedIdProviderPaged
<azure.mgmt.datalake.store.models.TrustedIdProviderPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
"""
Model Checkpointing
===================
Automatically save model checkpoints during training.
"""
import os
import re
import numpy as np
from typing import Optional
import torch
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn, rank_zero_only
class ModelCheckpoint(Callback):
r"""
Save the model after every epoch if it improves.
After training finishes, use :attr:`best_model_path` to retrieve the path to the
best checkpoint file and :attr:`best_model_score` to retrieve its score.
Args:
filepath: path to save the model file.
Can contain named formatting options to be auto-filled.
Example::
# custom path
# saves a file like: my/path/epoch_0.ckpt
>>> checkpoint_callback = ModelCheckpoint('my/path/')
# save any arbitrary metrics like `val_loss`, etc. in name
# saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt
>>> checkpoint_callback = ModelCheckpoint(
... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'
... )
Can also be set to `None`, then it will be set to default location
during trainer construction.
monitor: quantity to monitor.
verbose: verbosity mode. Default: ``False``.
save_last: always saves the model at the end of the epoch. Default: ``False``.
save_top_k: if `save_top_k == k`,
the best k models according to
the quantity monitored will be saved.
if ``save_top_k == 0``, no models are saved.
if ``save_top_k == -1``, all models are saved.
Please note that the monitors are checked every `period` epochs.
if ``save_top_k >= 2`` and the callback is called multiple
times inside an epoch, the name of the saved file will be
appended with a version count starting with `v0`.
mode: one of {auto, min, max}.
If ``save_top_k != 0``, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if ``True``, then only the model's weights will be
saved (``model.save_weights(filepath)``), else the full model
is saved (``model.save(filepath)``).
period: Interval (number of epochs) between checkpoints.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import ModelCheckpoint
# saves checkpoints to 'my/path/' whenever 'val_loss' has a new min
>>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')
>>> trainer = Trainer(checkpoint_callback=checkpoint_callback)
# save epoch and val_loss in name
# saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt
>>> checkpoint_callback = ModelCheckpoint(
... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'
... )
# retrieve the best checkpoint after training
checkpoint_callback = ModelCheckpoint(filepath='my/path/')
trainer = Trainer(checkpoint_callback=checkpoint_callback)
model = ...
trainer.fit(model)
checkpoint_callback.best_model_path
"""
def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,
save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False,
mode: str = 'auto', period: int = 1, prefix: str = ''):
super().__init__()
if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:
rank_zero_warn(
f"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0."
"All files in this directory will be deleted when a checkpoint is saved!"
)
self._rank = 0
self.monitor = monitor
self.verbose = verbose
if filepath is None: # will be determined by trainer at runtime
self.dirpath, self.filename = None, None
else:
if os.path.isdir(filepath):
self.dirpath, self.filename = filepath, '{epoch}'
else:
filepath = os.path.realpath(filepath)
self.dirpath, self.filename = os.path.split(filepath)
os.makedirs(self.dirpath, exist_ok=True)
self.save_last = save_last
self.save_top_k = save_top_k
self.save_weights_only = save_weights_only
self.period = period
self.epoch_last_check = None
self.prefix = prefix
self.best_k_models = {}
# {filename: monitor}
self.kth_best_model_path = ''
self.best_model_score = 0
self.best_model_path = ''
self.save_function = None
torch_inf = torch.tensor(np.Inf)
mode_dict = {
'min': (torch_inf, 'min'),
'max': (-torch_inf, 'max'),
'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')
else (torch_inf, 'min'),
}
if mode not in mode_dict:
rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, '
f'fallback to auto mode.', RuntimeWarning)
mode = 'auto'
self.kth_value, self.mode = mode_dict[mode]
@property
def best(self):
rank_zero_warn("Attribute `best` has been renamed to `best_model_score` since v0.8.0"
" and will be removed in v0.10.0", DeprecationWarning)
return self.best_model_score
@property
def kth_best_model(self):
rank_zero_warn("Attribute `kth_best_model` has been renamed to `kth_best_model_path` since v0.8.0"
" and will be removed in v0.10.0", DeprecationWarning)
return self.kth_best_model_path
def _del_model(self, filepath):
if os.path.isfile(filepath):
os.remove(filepath)
def _save_model(self, filepath):
# make paths
os.makedirs(os.path.dirname(filepath), exist_ok=True)
# delegate the saving to the model
if self.save_function is not None:
self.save_function(filepath, self.save_weights_only)
else:
raise ValueError(".save_function() not set")
def check_monitor_top_k(self, current):
less_than_k_models = len(self.best_k_models) < self.save_top_k
if less_than_k_models:
return True
if not isinstance(current, torch.Tensor):
rank_zero_warn(
f'{current} is supposed to be a `torch.Tensor`. Saving checkpoint may not work correctly.'
f' HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning
)
current = torch.tensor(current)
monitor_op = {
"min": torch.lt,
"max": torch.gt,
}[self.mode]
return monitor_op(current, self.best_k_models[self.kth_best_model_path])
def format_checkpoint_name(self, epoch, metrics, ver=None):
"""Generate a filename according to the defined template.
Example::
>>> tmpdir = os.path.dirname(__file__)
>>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))
>>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
'epoch=0.ckpt'
>>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))
>>> os.path.basename(ckpt.format_checkpoint_name(5, {}))
'epoch=005.ckpt'
>>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))
>>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))
'epoch=2-val_loss=0.12.ckpt'
>>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))
>>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
'missing=0.ckpt'
"""
# check if user passed in keys to the string
groups = re.findall(r'(\{.*?)[:\}]', self.filename)
if len(groups) == 0:
# default name
filename = f'{self.prefix}_ckpt_epoch_{epoch}'
else:
metrics['epoch'] = epoch
filename = self.filename
for tmp in groups:
name = tmp[1:]
filename = filename.replace(tmp, name + '={' + name)
if name not in metrics:
metrics[name] = 0
filename = filename.format(**metrics)
str_ver = f'_v{ver}' if ver is not None else ''
filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')
return filepath
@rank_zero_only
def on_train_start(self, trainer, pl_module):
"""
Determine model checkpoint save directory at runtime. References attributes from the
Trainer's logger to determine where to save checkpoints.
"""
if self.dirpath is not None:
return # short circuit
self.filename = '{epoch}'
if trainer.logger is not None:
# weights_save_path overrides anything
save_dir = (getattr(trainer, 'weights_save_path', None)
or getattr(trainer.logger, 'save_dir', None)
or trainer.default_root_dir)
version = trainer.logger.version if isinstance(
trainer.logger.version, str) else f'version_{trainer.logger.version}'
ckpt_path = os.path.join(
save_dir,
trainer.logger.name,
version,
"checkpoints"
)
else:
ckpt_path = os.path.join(trainer.default_root_dir, "checkpoints")
self.dirpath = ckpt_path
assert trainer.global_rank == 0, 'tried to make a checkpoint from non global_rank=0'
os.makedirs(self.dirpath, exist_ok=True)
trainer.ckpt_path = ckpt_path
trainer.weights_save_path = ckpt_path
@rank_zero_only
def on_validation_end(self, trainer, pl_module):
# only run on main process
if trainer.global_rank != 0:
return
metrics = trainer.callback_metrics
epoch = trainer.current_epoch
if self.save_top_k == 0:
# no models are saved
return
if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:
# skipping in this term
return
self.epoch_last_check = epoch
if self.save_last:
filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt')
self._save_model(filepath)
filepath = self.format_checkpoint_name(epoch, metrics)
version_cnt = 0
while os.path.isfile(filepath):
filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)
# this epoch called before
version_cnt += 1
if self.save_top_k != -1:
current = metrics.get(self.monitor)
if not isinstance(current, torch.Tensor):
rank_zero_warn(
f'The metric you returned {current} must be a `torch.Tensor` instance, checkpoint not saved'
f' HINT: what is the value of {self.monitor} in validation_epoch_end()?', RuntimeWarning
)
if current is not None:
current = torch.tensor(current)
if current is None:
rank_zero_warn(
f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning
)
elif self.check_monitor_top_k(current):
self._do_check_save(filepath, current, epoch)
elif self.verbose > 0:
log.info(f'\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')
else:
if self.verbose > 0:
log.info(f'\nEpoch {epoch:05d}: saving model to {filepath}')
assert trainer.global_rank == 0, 'tried to make a checkpoint from non global_rank=0'
self._save_model(filepath)
def _do_check_save(self, filepath, current, epoch):
# remove kth
del_list = []
if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:
delpath = self.kth_best_model_path
self.best_k_models.pop(self.kth_best_model_path)
del_list.append(delpath)
self.best_k_models[filepath] = current
if len(self.best_k_models) == self.save_top_k:
# monitor dict has reached k elements
_op = max if self.mode == 'min' else min
self.kth_best_model_path = _op(self.best_k_models,
key=self.best_k_models.get)
self.kth_value = self.best_k_models[self.kth_best_model_path]
_op = min if self.mode == 'min' else max
self.best_model_path = _op(self.best_k_models, key=self.best_k_models.get)
self.best_model_score = self.best_k_models[self.best_model_path]
if self.verbose > 0:
log.info(
f'\nEpoch {epoch:05d}: {self.monitor} reached'
f' {current:0.5f} (best {self.best_model_score:0.5f}), saving model to'
f' {filepath} as top {self.save_top_k}')
self._save_model(filepath)
for cur_path in del_list:
if cur_path != filepath:
self._del_model(cur_path)
|
'''
After creating tractography streamlines with dipy_csd.py,
this workflow takes an atlas file and finds connections
between each region in the atlas
KRS 2018.05.04
'''
from nipype import config
config.set('execution', 'remove_unnecessary_outputs', 'false')
config.set('execution', 'crashfile_format', 'txt')
from nipype import Node, Function, Workflow, IdentityInterface, MapNode
from nipype.interfaces.io import SelectFiles, DataSink
import os
from glob import glob
# which data sampling? also used for naming
out_prefix = 'dipy_csd'
atlas_type = 'func-atlas_shift_vox-4_ax-1'
proj_dir = os.path.abspath('/om2/user/ksitek/maastricht/diffusion_faruk/')
data_dir = os.path.join(proj_dir, 'data/01_diff_preprocessed')
out_base = os.path.join(proj_dir, 'analysis/')
out_dir = os.path.join(out_base, '%s_%s/'%(out_prefix, atlas_type))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
work_dir = os.path.abspath('/om2/scratch/ksitek/%s_%s_0114/'%(out_prefix, atlas_type))
#sids = ['S02']
sids = ['S%02d' %s for s in range(1,12)]
roi_names = ['LH_CN', 'LH_SOC', 'LH_IC', 'LH_MGB',
'RH_CN', 'RH_SOC', 'RH_IC', 'RH_MGB']
rois = list(range(len(roi_names)))
'''
roi_dir = os.path.join(proj_dir, 'analysis/roi_diff/')
subj_rois = {}
for subj in sids:
subj_rois[subj] = sorted(glob('%s/%s/%s_roi*_2diff.nii.gz'%(roi_dir, subj, subj)))
print(subj_rois)
'''
roi_dir = os.path.join(proj_dir, 'analysis/roi_diff_shift/')
subj_rois = {}
for subj in sids:
subj_rois[subj] = sorted(glob('%s/%s/%s_roi*_2diff_shift_vox-4_ax-1.nii.gz'%(roi_dir, subj, subj)))
print(subj_rois)
# create the nipype workflow
wf = Workflow(name='connectivity')
wf.config['execution']['crashfile_format'] = 'txt'
# define inputs to the workflow
infosource = Node(IdentityInterface(fields=['subject_id', 'roi']), name='infosource')
infosource.iterables = [('subject_id', list(subj_rois.keys())),
('roi', rois)]
# grab data
#templates = {'trk': 'analysis/mrtrix/{subject_id}/tracks.trk'}
templates = {'trk': 'analysis/fathresh-0.5/{subject_id}/recon/{subject_id}_csd_streamline.trk'}
grabber = Node(SelectFiles(templates), name='grabber')
grabber.inputs.base_directory = proj_dir
grabber.inputs.sort_filelist = True
wf.connect(infosource, 'subject_id', grabber, 'subject_id')
''' define ROI mask files '''
# get subject-specific list of ROI filenames:
def rois_fetcher(subj_rois, subj):
return subj_rois[subj], subj
fetch_rois = Node(Function(input_names=['subj_rois', 'subj'],
output_names=['target_roi_filenames', 'subj'],
function=rois_fetcher),
name='fetch_rois')
fetch_rois.inputs.subj_rois = subj_rois
wf.connect(infosource, 'subject_id', fetch_rois, 'subj')
# get single ROI filename for a specific subject:
def roi_fetcher(subj_rois, subj, roi_idx):
return subj_rois[subj][roi_idx], roi_idx
fetch_roi = Node(Function(input_names=['subj_rois', 'subj', 'roi_idx'],
output_names=['seed_roi', 'roi_idx'],
function=roi_fetcher),
name='fetch_roi')
fetch_roi.inputs.subj_rois = subj_rois
wf.connect(fetch_rois, 'subj', fetch_roi, 'subj')
wf.connect(infosource, 'roi', fetch_roi, 'roi_idx')
''' streamline filtering '''
# filter streamlines by seed region of interest
def sl_filter(streamlines, target_mask):
from dipy.tracking.utils import target
#from nilearn.image import resample_img
import numpy as np
import os
import nibabel as nib
trk_file = nib.streamlines.load(streamlines)
streams = trk_file.streamlines
hdr = trk_file.header
# resample mask to resolution of input data & get data
#target_resamp = resample_img(target_mask, affine)
target_mask_img = nib.load(target_mask)
affine = target_mask_img.affine
target_mask_bool = np.zeros(target_mask_img.get_data().shape)
target_mask_bool[target_mask_img.get_data().round()>0]=1 # rounding is key!
target_sl_generator = target(streams, target_mask_bool, affine, include=True)
target_streams = list(target_sl_generator)
# create new filtered streamlines .trk file
tractogram = nib.streamlines.Tractogram(target_streams)
tractogram.affine_to_rasmm = np.eye(4)
trk_file = nib.streamlines.TrkFile(tractogram, header=hdr)
# get the filename
import re
label = re.search(r'(?<=Fix_)\w+',target_mask).group(0)[:-6]
# save streamlines to filename
target_streamlines = os.path.abspath('target_streamlines_region_%s.trk'%label)
nib.streamlines.save(trk_file, target_streamlines)
return target_streamlines, target_mask, affine, label
filter_streamlines = Node(Function(input_names = ['streamlines', 'target_mask'],
output_names = ['target_streamlines', 'target_mask',
'affine', 'seed_label'],
function = sl_filter),
name = 'filter_streamlines')
filter_streamlines.inputs.roi_names = roi_names
wf.connect(grabber, 'trk', filter_streamlines, 'streamlines')
wf.connect(fetch_roi, 'seed_roi', filter_streamlines, 'target_mask')
# filter streamlines by target ROI (for each seed ROI)
def sl_filter_target(streamlines, target_mask, affine, seed_label):
from dipy.tracking.utils import target
from nilearn.image import resample_img
import numpy as np
import os
import nibabel as nib
trk_file = nib.streamlines.load(streamlines)
streams = trk_file.streamlines
hdr = trk_file.header
# resample mask to resolution of input data & get data
#target_resamp = resample_img(target_mask, affine)
target_mask_img = nib.load(target_mask)
affine = target_mask_img.affine
target_mask_bool = np.zeros(target_mask_img.get_data().shape)
target_mask_bool[target_mask_img.get_data().round()>0]=1 # rounding is key!
target_sl_generator = target(streams, target_mask_bool, affine, include=True)
target_streams = list(target_sl_generator)
# create new filtered streamlines .trk file
tractogram = nib.streamlines.Tractogram(target_streams)
tractogram.affine_to_rasmm = np.eye(4)
trk_file = nib.streamlines.TrkFile(tractogram, header=hdr)
# get the filename
import re
label = re.search(r'(?<=Fix_)\w+',target_mask).group(0)[:-6]
# save streamlines to filename
target_streamlines = os.path.abspath('target_streamlines_seed-%s_target-%s.trk'%(seed_label, label))
nib.streamlines.save(trk_file, target_streamlines)
return target_streamlines
filter_streamlines_target = MapNode(Function(input_names = ['streamlines', 'target_mask',
'affine', 'seed_label'],
output_names = ['target_streamlines'],
function = sl_filter_target),
iterfield = ['target_mask'],
name = 'filter_streamlines_target')
wf.connect(fetch_rois, 'target_roi_filenames', filter_streamlines_target, 'target_mask')
wf.connect(filter_streamlines, 'target_streamlines', filter_streamlines_target, 'streamlines')
wf.connect(filter_streamlines, 'affine', filter_streamlines_target, 'affine')
wf.connect(filter_streamlines, 'seed_label', filter_streamlines_target, 'seed_label')
''' workflow '''
# create the output data sink
ds = Node(DataSink(parameterization=False), name='sinker')
ds.inputs.base_directory = out_dir
ds.plugin_args = {'overwrite': True}
wf.connect(infosource, 'subject_id', ds, 'container')
wf.connect(filter_streamlines_target, 'target_streamlines', ds, 'target_streamlines')
# definte the working directory and run the workflow
wf.base_dir = work_dir
wf.run(plugin='MultiProc')
|
from .contrib import * # noqa
from .models import * # noqa
from .utils import * # noqa
from .checkpoint import load_ckpt, save_ckpt, remove_ckpt, clean_ckpt
from .cmd_args import parse_args
from .config import (cfg, set_cfg, load_cfg, dump_cfg, set_run_dir,
set_out_dir, get_fname)
from .init import init_weights
from .loader import create_loader
from .logger import set_printing, create_logger
from .loss import compute_loss
from .model_builder import create_model
from .optim import create_optimizer, create_scheduler
from .train import train
from .register import (register_base, register_act, register_node_encoder,
register_edge_encoder, register_stage, register_head,
register_layer, register_pooling, register_network,
register_config, register_dataset, register_loader,
register_optimizer, register_scheduler, register_loss,
register_train, register_metric)
__all__ = classes = [
'load_ckpt',
'save_ckpt',
'remove_ckpt',
'clean_ckpt',
'parse_args',
'cfg',
'set_cfg',
'load_cfg',
'dump_cfg',
'set_run_dir',
'set_out_dir',
'get_fname',
'init_weights',
'create_loader',
'set_printing',
'create_logger',
'compute_loss',
'create_model',
'create_optimizer',
'create_scheduler',
'train',
'register_base',
'register_act',
'register_node_encoder',
'register_edge_encoder',
'register_stage',
'register_head',
'register_layer',
'register_pooling',
'register_network',
'register_config',
'register_dataset',
'register_loader',
'register_optimizer',
'register_scheduler',
'register_loss',
'register_train',
'register_metric',
]
|
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
from pylint.reporters.base_reporter import BaseReporter
class CollectingReporter(BaseReporter):
"""collects messages"""
name = "collector"
def __init__(self):
BaseReporter.__init__(self)
self.messages = []
def handle_message(self, msg):
self.messages.append(msg)
_display = None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import requests
import sys
import tarfile
from lxml import etree
from tqdm import tqdm
INFOLEG_URL = 'https://cs.famaf.unc.edu.ar/~ccardellino/resources/mirel/law_text_cleaned.tar.bz2'
INFOLEG_TMP = '/tmp/infoleg.tar.bz2'
INFOLEG_OUT = '/tmp/law_text_cleaned/'
INFOLEG_FIL = './corpora/infoleg/infoleg.txt'
os.makedirs(os.path.dirname(INFOLEG_FIL), exist_ok=True)
print("Downloading file...", file=sys.stderr)
req = requests.get(INFOLEG_URL)
with open(INFOLEG_TMP, 'wb') as fh:
fh.write(req.content)
with tarfile.open(INFOLEG_TMP, 'r') as fi, open(INFOLEG_FIL, 'w') as fo:
print("Extracting file...", file=sys.stderr)
fi.extractall(path="/tmp")
print("Parsing files...", file=sys.stderr)
for fname in tqdm(os.listdir(INFOLEG_OUT)):
root = etree.parse(INFOLEG_OUT + fname).getroot()
print(root.find('text').text, file=fo)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="SymCircuit",
version="0.2.0",
author="Martok",
author_email="martok@martoks-place.de",
description="Symbolic electronic circuit analysis",
long_description=open("README.md","rt").read(),
long_description_content_type="text/markdown",
url="https://github.com/martok/py-symcircuit",
project_urls={
"Bug Tracker": "https://github.com/martok/py-symcircuit/issues",
},
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
],
packages=find_packages(),
python_requires='>=3.6',
install_requires=[
"sympy",
],
extras_require={
"EE": [
"networkx",
"numpy",
"mplotkit"
],
},
)
|
from ...plugin import hookimpl
from ..custom import CustomBuilder
from ..sdist import SdistBuilder
from ..wheel import WheelBuilder
@hookimpl
def hatch_register_builder():
return [CustomBuilder, SdistBuilder, WheelBuilder]
|
import os
import signal
import sys
import time
import pytest
import ray
import ray.ray_constants as ray_constants
from ray._private.cluster_utils import Cluster
from ray.test_utils import RayTestTimeoutException, get_other_nodes
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
@pytest.fixture(params=[(1, 4), (4, 4)])
def ray_start_workers_separate_multinode(request):
num_nodes = request.param[0]
num_initial_workers = request.param[1]
# Start the Ray processes.
cluster = Cluster()
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_initial_workers)
ray.init(address=cluster.address)
yield num_nodes, num_initial_workers
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_worker_failed(ray_start_workers_separate_multinode):
num_nodes, num_initial_workers = (ray_start_workers_separate_multinode)
@ray.remote
def get_pids():
time.sleep(0.25)
return os.getpid()
start_time = time.time()
pids = set()
while len(pids) < num_nodes * num_initial_workers:
new_pids = ray.get([
get_pids.remote()
for _ in range(2 * num_nodes * num_initial_workers)
])
for pid in new_pids:
pids.add(pid)
if time.time() - start_time > 60:
raise RayTestTimeoutException(
"Timed out while waiting to get worker PIDs.")
@ray.remote
def f(x):
time.sleep(0.5)
return x
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_refs = [f.remote(i) for i in range(num_initial_workers * num_nodes)]
object_refs += [f.remote(object_ref) for object_ref in object_refs]
# Allow the tasks some time to begin executing.
time.sleep(0.1)
# Kill the workers as the tasks execute.
for pid in pids:
try:
os.kill(pid, SIGKILL)
except OSError:
# The process may have already exited due to worker capping.
pass
time.sleep(0.1)
# Make sure that we either get the object or we get an appropriate
# exception.
for object_ref in object_refs:
try:
ray.get(object_ref)
except (ray.exceptions.RayTaskError,
ray.exceptions.WorkerCrashedError):
pass
def _test_component_failed(cluster, component_type):
"""Kill a component on all worker nodes and check workload succeeds."""
# Submit many tasks with many dependencies.
@ray.remote
def f(x):
# Sleep to make sure that tasks actually fail mid-execution.
time.sleep(0.01)
return x
@ray.remote
def g(*xs):
# Sleep to make sure that tasks actually fail mid-execution. We
# only use it for direct calls because the test already takes a
# long time to run with the raylet codepath.
time.sleep(0.01)
return 1
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = get_other_nodes(cluster)
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
x = 1
for _ in range(1000):
x = f.remote(x)
xs = [g.remote(1)]
for _ in range(100):
xs.append(g.remote(*xs))
xs.append(g.remote(1))
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
ray.get(x)
ray.get(xs)
def check_components_alive(cluster, component_type, check_component_alive):
"""Check that a given component type is alive on all worker nodes."""
worker_nodes = get_other_nodes(cluster)
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
if check_component_alive:
assert process.poll() is None
else:
print("waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
process.wait()
print("done waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
assert not process.poll() is None
@pytest.mark.parametrize(
"ray_start_cluster",
[{
"num_cpus": 8,
"num_nodes": 4,
"_system_config": {
# Raylet codepath is not stable with a shorter timeout.
"num_heartbeats_timeout": 10
},
}],
indirect=True)
def test_raylet_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all raylets on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET)
# The plasma stores should still be alive on the worker nodes.
check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE,
True)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
"""
This module defines a data structure for manipulating HTTP headers.
"""
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Tuple,
Union,
)
__all__ = ["Headers", "MultipleValuesError"]
class MultipleValuesError(LookupError):
"""
Exception raised when :class:`Headers` has more than one value for a key.
"""
def __str__(self) -> str:
# Implement the same logic as KeyError_str in Objects/exceptions.c.
if len(self.args) == 1:
return repr(self.args[0])
return super().__str__()
class Headers(MutableMapping[str, str]):
"""
Efficient data structure for manipulating HTTP headers.
A :class:`list` of ``(name, values)`` is inefficient for lookups.
A :class:`dict` doesn't suffice because header names are case-insensitive
and multiple occurrences of headers with the same name are possible.
:class:`Headers` stores HTTP headers in a hybrid data structure to provide
efficient insertions and lookups while preserving the original data.
In order to account for multiple values with minimal hassle,
:class:`Headers` follows this logic:
- When getting a header with ``headers[name]``:
- if there's no value, :exc:`KeyError` is raised;
- if there's exactly one value, it's returned;
- if there's more than one value, :exc:`MultipleValuesError` is raised.
- When setting a header with ``headers[name] = value``, the value is
appended to the list of values for that header.
- When deleting a header with ``del headers[name]``, all values for that
header are removed (this is slow).
Other methods for manipulating headers are consistent with this logic.
As long as no header occurs multiple times, :class:`Headers` behaves like
:class:`dict`, except keys are lower-cased to provide case-insensitivity.
Two methods support support manipulating multiple values explicitly:
- :meth:`get_all` returns a list of all values for a header;
- :meth:`raw_items` returns an iterator of ``(name, values)`` pairs.
"""
__slots__ = ["_dict", "_list"]
def __init__(self, *args: Any, **kwargs: str) -> None:
self._dict: Dict[str, List[str]] = {}
self._list: List[Tuple[str, str]] = []
# MutableMapping.update calls __setitem__ for each (name, value) pair.
self.update(*args, **kwargs)
def __str__(self) -> str:
return "".join(f"{key}: {value}\r\n" for key, value in self._list) + "\r\n"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._list!r})"
def copy(self) -> "Headers":
copy = self.__class__()
copy._dict = self._dict.copy()
copy._list = self._list.copy()
return copy
def serialize(self) -> bytes:
# Headers only contain ASCII characters.
return str(self).encode()
# Collection methods
def __contains__(self, key: object) -> bool:
return isinstance(key, str) and key.lower() in self._dict
def __iter__(self) -> Iterator[str]:
return iter(self._dict)
def __len__(self) -> int:
return len(self._dict)
# MutableMapping methods
def __getitem__(self, key: str) -> str:
value = self._dict[key.lower()]
if len(value) == 1:
return value[0]
else:
raise MultipleValuesError(key)
def __setitem__(self, key: str, value: str) -> None:
self._dict.setdefault(key.lower(), []).append(value)
self._list.append((key, value))
def __delitem__(self, key: str) -> None:
key_lower = key.lower()
self._dict.__delitem__(key_lower)
# This is inefficent. Fortunately deleting HTTP headers is uncommon.
self._list = [(k, v) for k, v in self._list if k.lower() != key_lower]
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Headers):
return NotImplemented
return self._list == other._list
def clear(self) -> None:
"""
Remove all headers.
"""
self._dict = {}
self._list = []
# Methods for handling multiple values
def get_all(self, key: str) -> List[str]:
"""
Return the (possibly empty) list of all values for a header.
:param key: header name
"""
return self._dict.get(key.lower(), [])
def raw_items(self) -> Iterator[Tuple[str, str]]:
"""
Return an iterator of all values as ``(name, value)`` pairs.
"""
return iter(self._list)
HeadersLike = Union[Headers, Mapping[str, str], Iterable[Tuple[str, str]]]
|
# -*- coding: utf-8 -*-
"""
Inverse Logistic Regression Recommender
Created on 2019
@author: Alex Xu <ayx2@case.edu>
"""
from .predict_feature_values import InverseLogisticRegressionRecommender
from .evaluate import validate
from .evaluate import _error_metrics_
__all__ = [
'validate',
'_error_metrics_'
]
|
import enum
from queue import Queue
class Animal(enum.Enum):
cat = 'cat'
dog = 'dog'
class AnimalShelter:
def __init__(self):
self.cats = Queue()
self.dogs = Queue()
self.pos = 0
# Time complexity: O(1)
# Space complexity: O(1)
def enqueue(self, animal: Animal):
if animal == Animal.cat:
self.cats.add([animal, self.pos])
else:
self.dogs.add([animal, self.pos])
self.pos += 1
return self
# Time complexity: O(1)
# Space complexity: O(1)
def dequeue(self):
if self.dogs.is_empty() and self.cats.is_empty():
raise Exception('no animal in shelter')
if self.dogs.is_empty():
return self.cats.remove()
if self.cats.is_empty():
return self.dogs.remove()
dog_pos = self.dogs.peek()[1]
cat_pos = self.cats.peek()[1]
if cat_pos < dog_pos:
return self.cats.remove()
else:
return self.dogs.remove()
# Time complexity: O(1)
# Space complexity: O(1)
def dequeueCat(self):
if self.cats.is_empty():
raise Exception('no cats in shelter')
return self.cats.remove()
# Time complexity: O(1)
# Space complexity: O(1)
def dequeueDog(self):
if self.dogs.is_empty():
raise Exception('no dogs in shelter')
return self.dogs.remove()
def __str__(self):
return 'cats: ' + str(self.cats) + '\ndogs: ' + str(self.dogs)
if __name__ == "__main__":
shelter = AnimalShelter()
shelter.enqueue(Animal.cat).enqueue(Animal.dog).enqueue(Animal.cat)
print(shelter)
print(shelter.dequeue())
print(shelter.dequeue())
print(shelter.dequeue())
# print(shelter.dequeue())
|
"""The Fast Gradient Method attack."""
import numpy as np
import tensorflow as tf
def fast_gradient_method(model_fn, x, eps, ord, clip_min=None, clip_max=None, y=None,
targeted=False, sanity_checks=False):
"""
Tensorflow 2.0 implementation of the Fast Gradient Method.
:param model_fn: a callable that takes an input tensor and returns the model logits.
:param x: input tensor.
:param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572.
:param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2.
:param clip_min: (optional) float. Minimum float value for adversarial example components.
:param clip_max: (optional) float. Maximum float value for adversarial example components.
:param y: (optional) Tensor with true labels. If targeted is true, then provide the
target label. Otherwise, only provide this parameter if you'd like to use true
labels when crafting adversarial samples. Otherwise, model predictions are used
as labels to avoid the "label leaking" effect (explained in this paper:
https://arxiv.org/abs/1611.01236). Default is None.
:param targeted: (optional) bool. Is the attack targeted or untargeted?
Untargeted, the default, will try to make the label incorrect.
Targeted will instead try to move in the direction of being more like y.
:param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime /
memory or for unit tests that intentionally pass strange input)
:return: a tensor for the adversarial example
"""
if ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(tf.math.greater_equal(x, clip_min))
if clip_max is not None:
asserts.append(tf.math.less_equal(x, clip_max))
if y is None:
# Using model predictions as ground truth to avoid label leaking
y = tf.argmax(model_fn(x), 1)
grad = compute_gradient(model_fn, x, y, targeted)
optimal_perturbation = optimize_linear(grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
assert np.all(asserts)
return adv_x
# Due to performance reasons, this function is wrapped inside of tf.function decorator.
# Not using the decorator here, or letting the user wrap the attack in tf.function is way
# slower on Tensorflow 2.0.0-alpha0.
@tf.function
def compute_gradient(model_fn, x, y, targeted):
"""
Computes the gradient of the loss with respect to the input tensor.
:param model_fn: a callable that takes an input tensor and returns the model logits.
:param x: input tensor
:param y: Tensor with true labels. If targeted is true, then provide the target label.
:param targeted: bool. Is the attack targeted or untargeted? Untargeted, the default, will
try to make the label incorrect. Targeted will instead try to move in the
direction of being more like y.
:return: A tensor containing the gradient of the loss with respect to the input tensor.
"""
loss_fn = tf.nn.sparse_softmax_cross_entropy_with_logits
with tf.GradientTape() as g:
g.watch(x)
# Compute loss
loss = loss_fn(labels=y, logits=model_fn(x))
if targeted: # attack is targeted, minimize loss of target label rather than maximize loss of correct label
loss = -loss
# Define gradient of loss wrt input
grad = g.gradient(loss, x)
return grad
def optimize_linear(grad, eps, ord=np.inf):
"""
Solves for the optimal input to a linear function under a norm constraint.
Optimal_perturbation = argmax_{eta, ||eta||_{ord} < eps} dot(eta, grad)
:param grad: tf tensor containing a batch of gradients
:param eps: float scalar specifying size of constraint region
:param ord: int specifying order of norm
:returns:
tf tensor containing optimal perturbation
"""
# Convert the iterator returned by `range` into a list.
axis = list(range(1, len(grad.get_shape())))
avoid_zero_div = 1e-12
if ord == np.inf:
# Take sign of gradient
optimal_perturbation = tf.sign(grad)
# The following line should not change the numerical results. It applies only because
# `optimal_perturbation` is the output of a `sign` op, which has zero derivative anyway.
# It should not be applied for the other norms, where the perturbation has a non-zero derivative.
optimal_perturbation = tf.stop_gradient(optimal_perturbation)
elif ord == 1:
abs_grad = tf.abs(grad)
sign = tf.sign(grad)
max_abs_grad = tf.reduce_max(abs_grad, axis, keepdims=True)
tied_for_max = tf.dtypes.cast(tf.equal(abs_grad, max_abs_grad), dtype=tf.float32)
num_ties = tf.reduce_sum(tied_for_max, axis, keepdims=True)
optimal_perturbation = sign * tied_for_max / num_ties
elif ord == 2:
square = tf.maximum(avoid_zero_div, tf.reduce_sum(tf.square(grad), axis, keepdims=True))
optimal_perturbation = grad / tf.sqrt(square)
else:
raise NotImplementedError("Only L-inf, L1 and L2 norms are currently implemented.")
# Scale perturbation to be the solution for the norm=eps rather than norm=1 problem
scaled_perturbation = tf.multiply(eps, optimal_perturbation)
return scaled_perturbation
|
# Copyright 2016 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import os
import logging
TAG = 'version_2'
HASH = '317b22ad9b6b2f7b40fac7b7c426da2fa2da1803bbe58d480631f1e5b190d730763f2768c77c72affa806c69a1e703f401b15a1be3ec611cd259950d5ebc3711'
def needed(settings):
return settings.USE_SDL_NET == 2
def get(ports, settings, shared):
sdl_build = os.path.join(ports.get_build_dir(), 'sdl2')
assert os.path.exists(sdl_build), 'You must use SDL2 to use SDL2_net'
ports.fetch_project('sdl2_net', 'https://github.com/emscripten-ports/SDL2_net/archive/' + TAG + '.zip', 'SDL2_net-' + TAG, sha512hash=HASH)
def create(final):
logging.info('building port: sdl2_net')
src_dir = os.path.join(ports.get_dir(), 'sdl2_net', 'SDL2_net-' + TAG)
ports.install_headers(src_dir, target='SDL2')
srcs = 'SDLnet.c SDLnetselect.c SDLnetTCP.c SDLnetUDP.c'.split()
commands = []
o_s = []
for src in srcs:
o = os.path.join(ports.get_build_dir(), 'sdl2_net', src + '.o')
commands.append([shared.EMCC, '-c', os.path.join(src_dir, src),
'-O2', '-s', 'USE_SDL=2', '-o', o, '-w'])
o_s.append(o)
shared.safe_ensure_dirs(os.path.dirname(o_s[0]))
ports.run_commands(commands)
ports.create_lib(final, o_s)
return [shared.Cache.get_lib('libSDL2_net.a', create, what='port')]
def clear(ports, settings, shared):
shared.Cache.erase_lib('libSDL2_net.a')
def process_args(ports):
return []
def show():
return 'SDL2_net (zlib license)'
|
from . import transforms
|
import inspect
import sys
from pathlib import Path
from types import TracebackType
from typing import NoReturn, Optional, Union
import rich.console
import typer
from . import Severity, Verbosity
from .config import config
__all__ = ["error", "warning", "info", "debug"]
COLOR_MAP = {
Severity.ERROR: "red",
Severity.WARNING: "yellow",
Severity.INFO: "blue",
Severity.DEBUG: "dim",
}
VERBOSITY_MAP = {
Severity.ERROR: Verbosity.QUIET,
Severity.WARNING: Verbosity.NORMAL,
Severity.INFO: Verbosity.VERBOSE,
Severity.DEBUG: Verbosity.DEBUG,
}
def error(
error: Exception,
file: Optional[Union[str, Path]] = None,
prev_except: Optional[Exception] = None,
) -> NoReturn:
"""Handle an error in pykeyset code.
Depending on the current configuration, this function can raise the error as an exception,
print the error to the terminal, and/or exit the script."""
# Try to remove this call from the traceback. This will make it look like the exception was
# raised where this function was called, not inside. Note: this is not guaranteed to work on
# version < 3.7 or implementation != CPython, in which case we just pass None to raise_or_print
frame = inspect.currentframe()
if frame is not None:
frame = frame.f_back
conf = config()
if conf.verbosity >= Verbosity.QUIET:
message = format_error(error)
print_message(message, Severity.ERROR, file)
if conf.is_script:
raise typer.Exit(1)
else:
# Create a traceback from frame (Python >= 3.7 only)
if frame is not None and sys.version_info >= (3, 7):
tb = TracebackType(
tb_next=None, tb_frame=frame, tb_lasti=frame.f_lasti, tb_lineno=frame.f_lineno
)
raise error.with_traceback(tb) from prev_except
else:
raise error from prev_except # pragma: no cover
def warning(
error: Exception,
resolution: str,
file: Optional[str] = None,
prev_except: Optional[Exception] = None,
) -> None:
"""Handle an warning in pykeyset code.
Depending on the current configuration, this function can raise the warning as an exception or
print the warning to the terminal, or silently ignore it."""
# See comment in error() for details. Warnings can also end up raising an exception (if
# raise_warnings is set in the config).
frame = inspect.currentframe()
if frame is not None:
frame = frame.f_back
conf = config()
if conf.verbosity >= Verbosity.NORMAL:
# Only format the resolution if this warning will not be raised. Otherwise the resolution
# doesn't resolve anything
if conf.raise_warnings:
message = format_error(error)
else:
message = format_error(error, resolution)
print_message(message, Severity.WARNING, file)
if conf.raise_warnings:
if conf.is_script:
raise typer.Exit(1)
else:
# Create a traceback from frame (Python >= 3.7 only)
if frame is not None and sys.version_info >= (3, 7):
tb = TracebackType(
tb_next=None, tb_frame=frame, tb_lasti=frame.f_lasti, tb_lineno=frame.f_lineno
)
raise error.with_traceback(tb) from prev_except
else:
raise error from prev_except # pragma: no cover
def info(message: str, file: Optional[str] = None):
if config().verbosity >= Verbosity.VERBOSE:
print_message(message, Severity.INFO, file)
def debug(message: str, file: Optional[str] = None):
if config().verbosity >= Verbosity.DEBUG:
print_message(message, Severity.DEBUG, file)
def format_error(error: Exception, resolution: Optional[str] = None) -> str:
if isinstance(error, OSError):
if error.filename is not None:
filename = Path(error.filename).name
result = f"cannot open file {format_filename(filename)}: {error.strerror.lower()}"
elif error.strerror is not None:
result = error.strerror.lower()
else:
result = str(error).lower()
else:
result = f"{error}"
if resolution is not None:
result = f"{result}. {resolution}"
return result
def format_filename(filename: Union[str, Path]) -> str:
return f"[bold magenta]{filename}[/bold magenta]"
def print_message(
message: str, severity: Severity, filename: Optional[Union[str, Path]] = None
) -> None:
color = COLOR_MAP.get(severity, "magenta")
prefix = severity.name.capitalize()
console = rich.console.Console(force_terminal=config().color, stderr=True)
console.print(f"[{color} bold]{prefix}:[/{color} bold] {message}")
if filename is not None:
console.print(f" In file {format_filename(filename)}")
|
import pandas as pd
import numpy as np
def load_and_process_data(path):
rawData = pd.read_csv(path, sep=";")
rawData = rawData[rawData.columns[:-2]].dropna().rename(columns={"RH": "Relative Humidity", "AH": "Absolute Humdity", "T": "Temp"})
for col in rawData.columns:
#covert strings into floats
if rawData[col].dtypes == object:
try:
rawData[col] = rawData[col].str.replace(",", ".")
rawData[col] = rawData[col].astype(float)
except ValueError:
pass
#remove row with values of less than 0
if rawData[col].dtypes==np.float64:
rawData = rawData[rawData[col]>=0]
return rawData
def getAverageConcentration(df, column):
'''
takes in dataFrame and a string column name
returns an array of 24 integers representing the average values of the column for every hour of the day
'''
average=0
averages = np.zeros(24)
print(type(df[column][0]))
for hour in range(24):
time = "%s.00.00"%hour
validColumns = df[df["Time"]==time]
average = float(validColumns.sum()[column]) / int(df.shape[0])
averages[hour]= average
return averages
pass
|
"""
Woopra template tags and filters.
"""
from __future__ import absolute_import
import json
import re
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError
from analytical.utils import (
disable_html,
get_identity,
get_required_setting,
get_user_from_context,
get_user_is_authenticated,
is_internal_ip,
)
DOMAIN_RE = re.compile(r'^\S+$')
TRACKING_CODE = """
<script type="text/javascript">
var woo_settings = %(settings)s;
var woo_visitor = %(visitor)s;
!function(){var a,b,c,d=window,e=document,f=arguments,g="script",h=["config","track","trackForm","trackClick","identify","visit","push","call"],i=function(){var a,b=this,c=function(a){b[a]=function(){return b._e.push([a].concat(Array.prototype.slice.call(arguments,0))),b}};for(b._e=[],a=0;a<h.length;a++)c(h[a])};for(d.__woo=d.__woo||{},a=0;a<f.length;a++)d.__woo[f[a]]=d[f[a]]=d[f[a]]||new i;b=e.createElement(g),b.async=1,b.src="//static.woopra.com/js/w.js",c=e.getElementsByTagName(g)[0],c.parentNode.insertBefore(b,c)}("woopra");
woopra.config(woo_settings);
woopra.identify(woo_visitor);
woopra.track();
</script>
""" # noqa
register = Library()
@register.tag
def woopra(parser, token):
"""
Woopra tracking template tag.
Renders Javascript code to track page visits. You must supply
your Woopra domain in the ``WOOPRA_DOMAIN`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return WoopraNode()
class WoopraNode(Node):
def __init__(self):
self.domain = get_required_setting(
'WOOPRA_DOMAIN', DOMAIN_RE,
"must be a domain name")
def render(self, context):
if settings.get("DISABLE_TRACKING_CODE", False):
return ""
cfg = self._get_settings(context)
visitor = self._get_visitor(context)
html = TRACKING_CODE % {
'settings': json.dumps(cfg, sort_keys=True),
'visitor': json.dumps(visitor, sort_keys=True),
}
if is_internal_ip(context, 'WOOPRA'):
html = disable_html(html, 'Woopra')
return html
def _get_settings(self, context):
variables = {'domain': self.domain}
try:
variables['idle_timeout'] = str(settings.WOOPRA_IDLE_TIMEOUT)
except AttributeError:
pass
return variables
def _get_visitor(self, context):
params = {}
for dict_ in context:
for var, val in dict_.items():
if var.startswith('woopra_'):
params[var[7:]] = val
if 'name' not in params and 'email' not in params:
user = get_user_from_context(context)
if user is not None and get_user_is_authenticated(user):
params['name'] = get_identity(
context, 'woopra', self._identify, user)
if user.email:
params['email'] = user.email
return params
def _identify(self, user):
name = user.get_full_name()
if not name:
name = user.username
return name
def contribute_to_analytical(add_node):
WoopraNode() # ensure properly configured
add_node('head_bottom', WoopraNode)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys
from os import path
clear = lambda: os.system('clear')
green = "\033[1;32;2m"
greenblink = "\033[1;32;5m"
yellow = "\033[1;33;2m"
yellowblink = "\033[1;33;5m"
redblink = "\033[1;31;5m"
red = "\033[1;31;2m"
white = "\033[1;37;0m"
normal = "\033[0m"
# =============================
if not path.exists("/run/secrets/redis_secret"):
print (red+" A Redis password is not set in the secrets."+normal)
print (red+" The server will start with the default password: "+green+"testpass"+normal)
print (red+" It is highly advisable to change this password for security reasons."+normal)
print (red+" Please refer to http://link.to.documentation to fix this. "+normal)
# sys.exit(1)
else:
print (green+" Setting everything up. It'll only take a second."+normal)
secret = open('/run/secrets/redis_secret', 'r')
with open('/usr/local/etc/redis/redis.conf') as f:
newText=f.read().replace('testpass', secret.read())
with open('/usr/local/etc/redis/redis.conf', "w") as f:
f.write(newText)
secret.close()
print(green+" Server is ready to start. That's what we will do next :)"+normal)
print("=========================================================================")
|
#
# Copyright 2014 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from cryptography import x509
import mock
from octavia.common import data_models
import octavia.common.exceptions as exceptions
import octavia.common.tls_utils.cert_parser as cert_parser
from octavia.tests.common import sample_certs
from octavia.tests.unit import base
from octavia.tests.unit.common.sample_configs import sample_configs_combined
class TestTLSParseUtils(base.TestCase):
def test_alt_subject_name_parses(self):
hosts = cert_parser.get_host_names(sample_certs.ALT_EXT_CRT)
self.assertIn('www.cnfromsubject.org', hosts['cn'])
self.assertIn('www.hostFromDNSName1.com', hosts['dns_names'])
self.assertIn('www.hostFromDNSName2.com', hosts['dns_names'])
self.assertIn('www.hostFromDNSName3.com', hosts['dns_names'])
self.assertIn('www.hostFromDNSName4.com', hosts['dns_names'])
def test_x509_parses(self):
self.assertRaises(exceptions.UnreadableCert,
cert_parser.validate_cert, "BAD CERT")
self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT))
self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY))
def test_read_private_key_pkcs8(self):
self.assertRaises(exceptions.NeedsPassphrase,
cert_parser._read_private_key,
sample_certs.ENCRYPTED_PKCS8_CRT_KEY)
cert_parser._read_private_key(
sample_certs.ENCRYPTED_PKCS8_CRT_KEY,
passphrase=sample_certs.ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE)
def test_read_private_key_pem(self):
self.assertRaises(exceptions.NeedsPassphrase,
cert_parser._read_private_key,
sample_certs.X509_CERT_KEY_ENCRYPTED)
cert_parser._read_private_key(
sample_certs.X509_CERT_KEY_ENCRYPTED,
passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE)
def test_prepare_private_key(self):
self.assertEqual(
cert_parser.prepare_private_key(
sample_certs.X509_CERT_KEY_ENCRYPTED,
passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE),
sample_certs.X509_CERT_KEY)
def test_prepare_private_key_orig_not_encrypted(self):
self.assertEqual(
cert_parser.prepare_private_key(
sample_certs.X509_CERT_KEY),
sample_certs.X509_CERT_KEY)
def test_validate_cert_and_key_match(self):
self.assertTrue(
cert_parser.validate_cert(
sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY))
self.assertTrue(
cert_parser.validate_cert(
sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY.decode('utf-8')))
self.assertRaises(exceptions.MisMatchedKey,
cert_parser.validate_cert,
sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY_2)
def test_validate_cert_handles_intermediates(self):
self.assertTrue(
cert_parser.validate_cert(
sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY,
intermediates=(sample_certs.X509_IMDS +
b"\nParser should ignore junk\n")))
self.assertTrue(
cert_parser.validate_cert(
sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY,
intermediates=sample_certs.X509_IMDS_LIST))
def test_split_x509s(self):
imds = []
for x509Pem in cert_parser._split_x509s(sample_certs.TEST_X509_IMDS):
imds.append(cert_parser._get_x509_from_pem_bytes(x509Pem))
for i in range(0, len(imds)):
self.assertEqual(sample_certs.EXPECTED_IMD_TEST_SUBJS[i],
imds[i].subject.get_attributes_for_oid(
x509.OID_COMMON_NAME)[0].value)
def test_get_intermediates_pem_chain(self):
self.assertEqual(
sample_certs.X509_IMDS_LIST,
list(cert_parser.get_intermediates_pems(sample_certs.X509_IMDS)))
def test_get_intermediates_pkcs7_pem(self):
self.assertEqual(
sample_certs.X509_IMDS_LIST,
list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_PEM)))
def test_get_intermediates_pkcs7_pem_bad(self):
self.assertRaises(
exceptions.UnreadableCert,
lambda: list(cert_parser.get_intermediates_pems(
b'-----BEGIN PKCS7-----\nbad data\n-----END PKCS7-----')))
def test_get_intermediates_pkcs7_der(self):
self.assertEqual(
sample_certs.X509_IMDS_LIST,
list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_DER)))
def test_get_intermediates_pkcs7_der_bad(self):
self.assertRaises(
exceptions.UnreadableCert,
lambda: list(cert_parser.get_intermediates_pems(
b'\xfe\xfe\xff\xff')))
def test_get_x509_from_der_bytes_bad(self):
self.assertRaises(
exceptions.UnreadableCert,
cert_parser._get_x509_from_der_bytes, b'bad data')
@mock.patch('oslo_context.context.RequestContext')
def test_load_certificates(self, mock_oslo):
listener = sample_configs_combined.sample_listener_tuple(
tls=True, sni=True, client_ca_cert=True)
client = mock.MagicMock()
context = mock.Mock()
context.project_id = '12345'
with mock.patch.object(cert_parser,
'get_host_names') as cp:
with mock.patch.object(cert_parser,
'_map_cert_tls_container'):
cp.return_value = {'cn': 'fakeCN'}
cert_parser.load_certificates_data(client, listener, context)
# Ensure upload_cert is called three times
calls_cert_mngr = [
mock.call.get_cert(context, 'cont_id_1', check_only=True),
mock.call.get_cert(context, 'cont_id_2', check_only=True),
mock.call.get_cert(context, 'cont_id_3', check_only=True)
]
client.assert_has_calls(calls_cert_mngr)
# Test asking for nothing
listener = sample_configs_combined.sample_listener_tuple(
tls=False, sni=False, client_ca_cert=False)
client = mock.MagicMock()
with mock.patch.object(cert_parser,
'_map_cert_tls_container') as mock_map:
result = cert_parser.load_certificates_data(client, listener)
mock_map.assert_not_called()
ref_empty_dict = {'tls_cert': None, 'sni_certs': []}
self.assertEqual(ref_empty_dict, result)
mock_oslo.assert_called()
def test_load_certificates_get_cert_errors(self):
mock_cert_mngr = mock.MagicMock()
mock_obj = mock.MagicMock()
mock_sni_container = mock.MagicMock()
mock_sni_container.tls_container_id = 2
mock_cert_mngr.get_cert.side_effect = [Exception, Exception]
# Test tls_certificate_id error
mock_obj.tls_certificate_id = 1
self.assertRaises(exceptions.CertificateRetrievalException,
cert_parser.load_certificates_data,
mock_cert_mngr, mock_obj)
# Test sni_containers error
mock_obj.tls_certificate_id = None
mock_obj.sni_containers = [mock_sni_container]
self.assertRaises(exceptions.CertificateRetrievalException,
cert_parser.load_certificates_data,
mock_cert_mngr, mock_obj)
@mock.patch('octavia.certificates.common.cert.Cert')
def test_map_cert_tls_container(self, cert_mock):
tls = data_models.TLSContainer(
id=sample_certs.X509_CERT_SHA1,
primary_cn=sample_certs.X509_CERT_CN,
certificate=sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY_ENCRYPTED,
passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE,
intermediates=sample_certs.X509_IMDS_LIST)
cert_mock.get_private_key.return_value = tls.private_key
cert_mock.get_certificate.return_value = tls.certificate
cert_mock.get_intermediates.return_value = tls.intermediates
cert_mock.get_private_key_passphrase.return_value = tls.passphrase
with mock.patch.object(cert_parser, 'get_host_names') as cp:
cp.return_value = {'cn': sample_certs.X509_CERT_CN}
self.assertEqual(
tls.id, cert_parser._map_cert_tls_container(
cert_mock).id)
self.assertEqual(
tls.primary_cn, cert_parser._map_cert_tls_container(
cert_mock).primary_cn)
self.assertEqual(
tls.certificate, cert_parser._map_cert_tls_container(
cert_mock).certificate)
self.assertEqual(
sample_certs.X509_CERT_KEY,
cert_parser._map_cert_tls_container(
cert_mock).private_key)
self.assertEqual(
tls.intermediates, cert_parser._map_cert_tls_container(
cert_mock).intermediates)
def test_build_pem(self):
expected = b'imacert\nimakey\nimainter\nimainter2\n'
tls_tuple = sample_configs_combined.sample_tls_container_tuple(
certificate=b'imacert', private_key=b'imakey',
intermediates=[b'imainter', b'imainter2'])
self.assertEqual(expected, cert_parser.build_pem(tls_tuple))
def test_get_primary_cn(self):
cert = sample_certs.X509_CERT
with mock.patch.object(cert_parser, 'get_host_names') as cp:
cp.return_value = {'cn': 'fakeCN'}
cn = cert_parser.get_primary_cn(cert)
self.assertEqual('fakeCN', cn)
def test_get_cert_expiration(self):
exp_date = cert_parser.get_cert_expiration(sample_certs.X509_EXPIRED)
self.assertEqual(datetime.datetime(2016, 9, 25, 18, 1, 54), exp_date)
# test the exception
self.assertRaises(exceptions.UnreadableCert,
cert_parser.get_cert_expiration, 'bad-cert-file')
|
from clothmanip.utils.utils import get_variant, argsparser, get_randomized_env, dump_commit_hashes, get_keys_and_dims, dump_goal
from clothmanip.envs.cloth import ClothEnvPickled as ClothEnv
import numpy as np
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic, TanhScriptPolicy, CustomScriptPolicy, CustomTanhScriptPolicy, ScriptPolicy
import cv2
import os
from rlkit.envs.wrappers import NormalizedBoxEnv
def main(variant):
variant['save_folder'] = "/home/julius/robotics/clothmanip/experiments/paper_images"
env = ClothEnv(**variant['env_kwargs'], has_viewer=True, save_folder=variant['save_folder'])
env = NormalizedBoxEnv(env)
env = get_randomized_env(env, variant)
keys, dims = get_keys_and_dims(variant, env)
demo_path = variant['demo_paths'][0]
predefined_actions = np.genfromtxt(demo_path, delimiter=',')
iter_folder = os.path.join(variant['save_folder'], "close_no_corners", "0")
os.makedirs(os.path.join(iter_folder, "corners_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "env_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "cnn_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "cnn_color_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "cnn_color_full_images"), exist_ok=True)
policy = TanhScriptPolicy(
output_size=dims['action_dim'],
added_fc_input_size=dims['added_fc_input_size'],
aux_output_size=9,
**variant['policy_kwargs'],
)
eval_policy = MakeDeterministic(policy)
for step_number, delta in enumerate(predefined_actions):
print(step_number)
a = delta/env.output_max
a = np.clip(a, -1, 1)
corner_image, eval_image, cnn_color_image_full, cnn_color_image, cnn_image = env.capture_images(None, mask_type=None)
cv2.imwrite(f'{iter_folder}/corners_images/{str(step_number).zfill(3)}.png', corner_image)
#cv2.imwrite(f'{iter_folder}/env_images/{str(step_number).zfill(3)}.png', eval_image)
#cv2.imwrite(f'{iter_folder}/cnn_images/{str(step_number).zfill(3)}.png', corner_image)
#cv2.imwrite(f'{iter_folder}/cnn_color_images/{str(step_number).zfill(3)}.png', cnn_color_image)
#cv2.imwrite(f'{iter_folder}/cnn_color_full_images/{str(step_number).zfill(3)}.png', cnn_color_image_full)
o, r, d, env_info = env.step(a)
if __name__ == "__main__":
args = argsparser()
variant, arg_str = get_variant(args)
main(variant)
|
from spacy.lang.en import English
from spacy.tokens import Token
nlp = English()
# Register the Token extension attribute "is_country" with the default value False
Token.set_extension("is_country", default=False)
# Process the text and set the is_country attribute to True for the token "Spain"
doc = nlp("I live in Spain.")
doc[3]._.is_country = True
# Print the token text and the is_country attribute for all tokens
print([(token.text, token._.is_country) for token in doc])
|
load("@rules_maven_third_party//:import_external.bzl", import_external = "import_external")
def dependencies():
import_external(
name = "org_eclipse_jgit_org_eclipse_jgit",
artifact = "org.eclipse.jgit:org.eclipse.jgit:5.11.0.202103091610-r",
artifact_sha256 = "b0f012105d67729a67c7fde546b6e89580f7ddc5bd73c6c7bae7084c50e36a37",
srcjar_sha256 = "23b4f2debe38b2e18cb925ada6639eb78cc029243060f8f8c080ba3e0e70ab71",
deps = [
"@com_googlecode_javaewah_JavaEWAH",
"@org_slf4j_slf4j_api",
],
)
|
from tensorize import *
class InceptionResnetV1(Model):
def inference(self, inputs, output):
stem(inputs, outputs)
for x in xrange(4):
inceptionA()
reductionA()
for x in xrange(7):
inceptionB()
reductionB()
for x in xrange(3):
inceptionC()
AveragePooling()
Dropout(0.8)
CategoricalPredictionOutput(output)
def train(self, outputs):
CategoricalCrossEntropy()
CategoricalAccuracy(outputs)
GradientDescentOptimizer()
class InceptionResnetV2(Model):
def inference(self, inputs, output):
stem(inputs, outputs)
for x in xrange(4):
inceptionA()
reductionA()
for x in xrange(7):
inceptionB()
reductionB()
for x in xrange(3):
inceptionC()
AveragePooling()
Dropout(0.8)
CategoricalPredictionOutput(output)
def train(self, outputs):
CategoricalCrossEntropy()
CategoricalAccuracy(outputs)
GradientDescentOptimizer()
def stem(inputs, outputs):
BatchImageInput(inputs)
Convolution3x3(filters=32)
Convolution3x3(filters=32)
Convolution3x3(filters=64)
with ParallelBlock() as parallel:
with parallel:
MaxPooling2D()
with parallel:
Convolution3x3(filters=64)
FilterConcat()
with ParallelBlock() as parallel:
with parallel:
Convolution1x1(filters=64)
Convolution3x3(filters=96)
with parallel:
Convolution1x1(filters=64)
Convolution2D([7, 1], filters=64)
Convolution2D([1, 7], filters=64)
Convolution3x3(filters=96)
FilterConcat()
with ParallelBlock() as block:
with block:
MaxPooling2D()
with block:
Convolution3x3(filters=64)
FilterConcat()
def inceptionA():
with ParallelBlock() as parallel:
with parallel:
AveragePooling()
Convolution1x1(filters=96)
with parallel:
Convolution1x1(filters=96)
with parallel:
Convolution1x1(filters=64)
Convolution3x3(filters=96)
with parallel:
Convolution1x1(filters=64)
Convolution3x3(filters=96)
Convolution3x3(filters=96)
FilterConcat()
def inceptionB():
with ParallelBlock() as parallel:
with parallel:
AveragePooling()
Convolution1x1(filters=128)
with parallel:
Convolution1x1(filters=384)
with parallel:
Convolution1x1(filters=192)
Convolution2D([1, 7], filters=224)
Convolution2D([1, 7], filters=256)
with parallel:
Convolution1x1(filters=192)
Convolution2D([1, 7], filters=192)
Convolution2D([7, 1], filters=224)
Convolution2D([1, 7], filters=224)
Convolution2D([7, 1], filters=256)
FilterConcat()
def inceptionC():
with ParallelBlock() as parallel:
with parallel:
AveragePooling()
Convolution1x1(filters=256)
with parallel:
Convolution1x1(filters=256)
with parallel:
Convolution1x1(filters=384)
with ParallelBlock() as parallel_inner:
with parallel_inner:
Convolution2D([1, 3], filters=256)
with parallel_inner:
Convolution2D([3, 1], filters=256)
with parallel:
Convolution1x1(filters=384)
Convolution2D([1, 3], filters=384)
Convolution2D([3, 1], filters=512)
FilterConcat()
def reduceA(n, l, k, m):
with ParallelBlock() as parallel:
with parallel:
MaxPooling2D([3, 3])
with parallel:
Convolution3x3(n)
with parallel:
Convolution1x1(filters=k)
Convolution3x3(filters=l)
Convolution3x3(filters=m)
FilterConcat()
def reduceB():
with ParallelBlock() as parallel:
with parallel:
MaxPooling2D([3, 3], stride=2)
with parallel:
Convolution1x1(192)
Convolution3x3(192)
with parallel:
Convolution1x1(filters=256)
Convolution2D([1, 7], filters=256)
Convolution2D([7, 1], filters=320)
Convolution3x3(filters=320, stride=2)
FilterConcat()
def inceptionResnetA():
RectifiedLinearUnit()
with ParallelBlock() as parallel:
with parallel:
with ParallelBlock() as parallel_inner:
with parallel_inner:
Convolution1x1(32)
with parallel_inner:
Convolution1x1(32)
Convolution3x3(32)
with parallel_inner:
Convolution1x1(32)
Convolution3x3(32)
Convolution3x3(32)
Convolution1x1(filters=256)
Sum()
def inceptionResnetB():
RectifiedLinearUnit()
with ParallelBlock() as parallel:
with parallel:
with ParallelBlock() as parallel_inner:
with parallel_inner:
Convolution1x1(128)
with parallel_inner:
Convolution1x1(128)
Convolution2D([1, 7], filters=128)
Convolution2D([7, 1], filters=128)
Convolution1x1(filters=896)
Sum()
|
"""
The temp module provides a NamedTemporaryFile that can be reopened in the same
process on any platform. Most platforms use the standard Python
tempfile.NamedTemporaryFile class, but Windows users are given a custom class.
This is needed because the Python implementation of NamedTemporaryFile uses the
O_TEMPORARY flag under Windows, which prevents the file from being reopened
if the same flag is not provided [1][2]. Note that this does not address the
more general issue of opening a file for writing and reading in multiple
processes in a manner that works across platforms.
Also note that the custom version of NamedTemporaryFile does not support the
full range of keyword arguments available in Python 2.6+ and 3.0+.
1: https://mail.python.org/pipermail/python-list/2005-December/336958.html
2: http://bugs.python.org/issue14243
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that supports reopening of the
temporary file in Windows.
Note that unlike tempfile.NamedTemporaryFile from the standard library,
__init__() does not support the 'delete' keyword argument in
Python 2.6+, or the 'delete', 'buffering', 'encoding', or 'newline'
keyword arguments in Python 3.0+.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
@property
def closed(self):
"""
This attribute needs to be accessible in certain situations,
because this class is supposed to mock the API of the class
tempfile.NamedTemporaryFile in the Python standard library.
"""
return self.file.closed
def __del__(self):
self.close()
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
|
# -*- coding: utf-8 -*-
__author__ = 'xu'
import os
from functools import wraps
from flask import Flask, Blueprint, jsonify
from flask_peewee.db import Database
from flask_peewee.auth import Auth
from flask_debugtoolbar import DebugToolbarExtension
from flask_mail import Mail
from flask_login import LoginManager
from flask_restful import Api
app = Flask(__name__)
APP_ENV = 'dev'
if not os.environ.has_key('APP_ENV') or os.environ['APP_ENV'].lower() == 'dev':
print 'Running on Dev Env:'
app.config.from_object('config')
elif os.environ['APP_ENV'].lower() in ('prod', 'test'):
print 'Running on %s Env:' %os.environ['APP_ENV'].upper()
app.config.from_object('config')
app.config.from_object('config_' + os.environ['APP_ENV'].lower())
APP_ENV = os.environ['APP_ENV'].lower()
else:
print 'Wrong Env!'
exit(1)
app.config["APP_ENV"] = APP_ENV
if not os.environ.has_key("VERIFY_HEADER_NAME") or not os.environ.has_key("VERIFY_PASSWORD") or not os.environ.has_key("VERIFY_HASHED"):
print 'Wrong Env!'
exit(1)
app.config["API_VERIFY"] = {
"verify_header": os.environ['VERIFY_HEADER_NAME'],
"password": os.environ["VERIFY_PASSWORD"],
"hashed": os.environ["VERIFY_HASHED"].replace("*", "$")
}
# print app.config["API_VERIFY"]
#db
db = Database(app)
auth = Auth(app, db)
toolbar = DebugToolbarExtension(app)
mail = Mail(app)
import models
import utils
utils.create_tables()
import views
# from api import ApiReset, ApiRegister, ApiLogin
# api_bp = Blueprint('api', __name__, url_prefix="/api")
# api = Api(api_bp, default_mediatype='application/json')
# resource_class_kwargs = {"models": models, "utils": utils}
# api.add_resource(
# ApiLogin,
# '/v1.0/login',
# resource_class_kwargs=resource_class_kwargs
# )
# api.add_resource(
# ApiRegister,
# '/v1.0/register',
# resource_class_kwargs=resource_class_kwargs
# )
# api.add_resource(
# ApiReset,
# '/v1.0/reset',
# resource_class_kwargs=resource_class_kwargs
# )
# app.register_blueprint(api_bp)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from openpyxl.worksheet.worksheet import Worksheet
COLUMNS = {"A": 20,
"B": 10,
"C": 10,
"D": 10,
"E": 10,
"F": 10,
"G": 10,
"H": 10,
"I": 10}
class RankingReportWriter(object):
def __init__(self,
some_excel_worksheet: Worksheet,
some_source_dimension: list,
some_target_dimensions: list,
some_final_ranking: list):
"""
:param some_excel_worksheet:
the excel worksheet to write to
"""
from . import WorksheetHelper
if not some_excel_worksheet:
raise ValueError("Mandatory Param: Excel Worksheet")
if not some_source_dimension:
raise ValueError("Mandatory Param: Source Dimension")
if not some_target_dimensions:
raise ValueError("Mandatory Param: Target Dimemsion")
if not some_final_ranking:
raise ValueError("Mandatory Param: Final Ranking")
self.worksheet = some_excel_worksheet
self.source_dimension = some_source_dimension
self.target_dimensions = some_target_dimensions
self.final_ranking = some_final_ranking
self.helper = WorksheetHelper
def _write_value(self,
some_column: str,
some_row: int,
some_text: str,
some_named_format: str):
"""
:param some_column:
:param some_row:
:param some_text:
:param some_named_format:
"""
cell = "{}{}".format(some_column,
some_row)
self.worksheet[cell].value = some_text
self.worksheet[cell].style = some_named_format
def _write_records(self,
source_weights: list,
source_values: list):
""" writes records by row and column """
def _dimension_value(value: str) -> dict:
return self.helper.struct(value, "dimension_value_source")
def _dimension_weight(value: str) -> dict:
return self.helper.struct(value, "dimension_weight_source")
def _header_dimension(value: str) -> dict:
return self.helper.struct(value, "header_dimension")
def _header_other(value: str) -> dict:
return self.helper.struct(value, "header_other")
def _field_key(value: str) -> dict:
return self.helper.struct(value, "keyfield")
def _field_weight(value: str) -> dict:
return self.helper.struct(value, "field_weight_source")
def _field_rank(value: str) -> dict:
return self.helper.struct(value, "field_rank")
d_row_1 = {
"A1": _header_other("Open Seat ID"),
"B1": _header_dimension("Cloud"),
"C1": _header_dimension("Database"),
"D1": _header_dimension("System Administrator"),
"E1": _header_dimension("Hard Skill"),
"F1": _header_dimension("Project Management"),
"G1": _header_dimension("Service Management"),
"H1": _header_dimension("Soft Skill"),
"I1": _header_other("Rank")}
d_row_2 = {
"A2": self.helper.struct(self.source_dimension[0]["key_field"],
"keyfield_value_source"),
"B2": _dimension_value(source_values[0]),
"C2": _dimension_value(source_values[1]),
"D2": _dimension_value(source_values[6]),
"E2": _dimension_value(source_values[2]),
"F2": _dimension_value(source_values[3]),
"G2": _dimension_value(source_values[4]),
"H2": _dimension_value(source_values[5])}
d_row_3 = {
"A3": self.helper.struct("Weight",
"dimension_weight_text"),
"B3": _dimension_weight(source_weights[0]),
"C3": _dimension_weight(source_weights[1]),
"D3": _dimension_weight(source_weights[6]),
"E3": _dimension_weight(source_weights[2]),
"F3": _dimension_weight(source_weights[3]),
"G3": _dimension_weight(source_weights[4]),
"H3": _dimension_weight(source_weights[5])}
def _field_weight_value(target_dimension: dict,
slot_name: str) -> str:
return target_dimension["slots"][slot_name]["weight"]
l_values = []
for i in range(0, len(self.target_dimensions)):
l_values.append({
"A{}".format(i + 5): _field_key(
self.target_dimensions[i]["key_field"]),
"B{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "cloud")),
"C{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "database")),
"D{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "system administrator")),
"E{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "hard skill")),
"F{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "project management")),
"G{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "service management")),
"H{}".format(i + 5): _field_weight(
_field_weight_value(self.target_dimensions[i], "soft skill")),
"I{}".format(i + 5): _field_rank(
self.final_ranking[i])})
self.helper.generate(self.worksheet,
[d_row_1, d_row_2, d_row_3])
self.helper.generate(self.worksheet,
l_values)
def process(self):
"""
Processes the logs from the input directory
@input: Base directory containing the input and output subdirs.
@output: None
"""
def _weights(some_records: list) -> list:
weights = []
for record in some_records:
weights.append([record["slots"][x]["weight"]
for x in record["slots"]])
return weights
def _values(some_records: list) -> list:
values = []
for record in some_records:
values.append([record["slots"][x]["z_score"]
for x in record["slots"]])
return values
source_weights = _weights(self.source_dimension)[0]
source_values = _values(self.source_dimension)[0]
self.helper.column_widths(self.worksheet,
COLUMNS)
self._write_records(source_weights,
source_values)
|
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
def selenium_initializer():
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
driver = webdriver.Chrome('../chromedriver', chrome_options=options)
return driver
if __name__ == "__main__":
pass
|
import argparse
def install(*args):
from .installer import Installer # noqa: autoimport
Installer.install(*args)
def clone(*args):
from .installer import Installer # noqa: autoimport
Installer.clone(*args)
def refresh(do_pull=False):
from .repomanager import RepoManager # noqa: autoimport
RepoManager.refresh(do_pull=do_pull)
def run_hooks():
from .repomanager import RepoManager # noqa: autoimport
RepoManager.run_hooks()
def main():
parser = argparse.ArgumentParser(description="Automate common git workflows")
parser.add_argument("action", nargs="?", help="The action to do", default="refresh")
parser.add_argument("names", nargs="*", help="repository names")
args = parser.parse_args()
action_mapper = {
"refresh": refresh,
"clone": clone,
"install": install,
"pull": lambda: refresh(do_pull=True),
"hooks": run_hooks,
}
if args.action not in action_mapper:
raise Exception(f"{args.action} not defined")
action = action_mapper[args.action]
action(*args.names)
if __name__ == "__main__":
main()
|
from datetime import date
from typing import List, Tuple, Optional
from linum.exceptions import IntersectionException
from linum.layer import Layer
from linum.task_part import TaskPart
class LayerList:
def __init__(self, layers: Optional[List[Layer]] = None):
"""
Массив слоев.
:param layers: слои для добавления в список
"""
self.layers = layers or []
def __repr__(self):
return "<LayerList with {} layer(s)>".format(len(self.layers))
def __eq__(self, other):
if not isinstance(other, LayerList):
return False
return self.layers == other.layers
def __getitem__(self, item):
return self.layers[item]
def __bool__(self):
if not self.layers:
return False
for layer in self.layers:
if layer:
return True
return False
def split(self, split_date: date) -> Tuple['LayerList', 'LayerList']:
"""
Функция разделения списка слоев на два относительно указанной даты.
:param split_date: date
:return:
"""
list_before = LayerList()
list_after = LayerList()
for layer in self.layers:
layer_before, layer_after = layer.split(split_date)
list_before.layers.append(layer_before)
list_after.layers.append(layer_after)
return list_before, list_after
def add_task_part(self, task_part: TaskPart):
"""
Добавление кусочка задачи в список слоев.
Если есть свободное место в текущих слоях, то кусочек добавится к ним.
Если свободного места нет, то список слоев расширится на один слой и
кусочек задачи будет помещен на этот новый слой.
:param task_part: кусочек задачи для добавления
"""
for layer in self.layers:
try:
layer.append(task_part)
return
except IntersectionException:
pass
layer = Layer([task_part])
self.layers.append(layer)
def cleanup(self):
layers = []
for layer in self.layers:
if layer:
layers.append(layer)
self.layers = layers
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
# try/except added for compatibility with python < 3.8
try:
from unittest import mock
from unittest.mock import AsyncMock
except ImportError:
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.versions import VersionsAsyncClient
from google.cloud.dialogflow_v2.services.versions import VersionsClient
from google.cloud.dialogflow_v2.services.versions import pagers
from google.cloud.dialogflow_v2.services.versions import transports
from google.cloud.dialogflow_v2.types import version
from google.cloud.dialogflow_v2.types import version as gcd_version
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VersionsClient._get_default_mtls_endpoint(None) is None
assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize(
"client_class,transport_name",
[
(VersionsClient, "grpc"),
(VersionsAsyncClient, "grpc_asyncio"),
],
)
def test_versions_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("dialogflow.googleapis.com:443")
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VersionsGrpcTransport, "grpc"),
(transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name",
[
(VersionsClient, "grpc"),
(VersionsAsyncClient, "grpc_asyncio"),
],
)
def test_versions_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("dialogflow.googleapis.com:443")
def test_versions_client_get_transport_class():
transport = VersionsClient.get_transport_class()
available_transports = [
transports.VersionsGrpcTransport,
]
assert transport in available_transports
transport = VersionsClient.get_transport_class("grpc")
assert transport == transports.VersionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_versions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient])
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_versions_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_versions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_versions_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type",
[
version.ListVersionsRequest,
dict,
],
)
def test_list_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
client.list_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
@pytest.mark.asyncio
async def test_list_versions_async(
transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_versions_async_from_dict():
await test_list_versions_async(request_type=dict)
def test_list_versions_field_headers():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_versions_field_headers_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_versions_flattened():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_versions(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_versions_flattened_error():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_versions(
version.ListVersionsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_versions_flattened_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_versions(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_versions_flattened_error_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_versions(
version.ListVersionsRequest(),
parent="parent_value",
)
def test_list_versions_pager(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
version.Version(),
],
next_page_token="abc",
),
version.ListVersionsResponse(
versions=[],
next_page_token="def",
),
version.ListVersionsResponse(
versions=[
version.Version(),
],
next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_versions(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, version.Version) for i in results)
def test_list_versions_pages(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
version.Version(),
],
next_page_token="abc",
),
version.ListVersionsResponse(
versions=[],
next_page_token="def",
),
version.ListVersionsResponse(
versions=[
version.Version(),
],
next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
],
),
RuntimeError,
)
pages = list(client.list_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_versions_async_pager():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
version.Version(),
],
next_page_token="abc",
),
version.ListVersionsResponse(
versions=[],
next_page_token="def",
),
version.ListVersionsResponse(
versions=[
version.Version(),
],
next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
],
),
RuntimeError,
)
async_pager = await client.list_versions(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, version.Version) for i in responses)
@pytest.mark.asyncio
async def test_list_versions_async_pages():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
version.Version(),
],
next_page_token="abc",
),
version.ListVersionsResponse(
versions=[],
next_page_token="def",
),
version.ListVersionsResponse(
versions=[
version.Version(),
],
next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[
version.Version(),
version.Version(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_versions(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
version.GetVersionRequest,
dict,
],
)
def test_get_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
response = client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
def test_get_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
client.get_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
@pytest.mark.asyncio
async def test_get_version_async(
transport: str = "grpc_asyncio", request_type=version.GetVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_get_version_async_from_dict():
await test_get_version_async(request_type=dict)
def test_get_version_field_headers():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_version_field_headers_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_version_flattened():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_version(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_version_flattened_error():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_version(
version.GetVersionRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_version_flattened_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_version(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_version_flattened_error_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_version(
version.GetVersionRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
gcd_version.CreateVersionRequest,
dict,
],
)
def test_create_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_create_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
client.create_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
@pytest.mark.asyncio
async def test_create_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_create_version_async_from_dict():
await test_create_version_async(request_type=dict)
def test_create_version_field_headers():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.CreateVersionRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_version_field_headers_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.CreateVersionRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_version_flattened():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_version(
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
def test_create_version_flattened_error():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_version_flattened_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_version(
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_version_flattened_error_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
gcd_version.UpdateVersionRequest,
dict,
],
)
def test_update_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_update_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
client.update_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
@pytest.mark.asyncio
async def test_update_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_update_version_async_from_dict():
await test_update_version_async(request_type=dict)
def test_update_version_field_headers():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.UpdateVersionRequest()
request.version.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"version.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_version_field_headers_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.UpdateVersionRequest()
request.version.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"version.name=name_value",
) in kw["metadata"]
def test_update_version_flattened():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_version_flattened_error():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_version_flattened_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_version_flattened_error_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
version.DeleteVersionRequest,
dict,
],
)
def test_delete_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
client.delete_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
@pytest.mark.asyncio
async def test_delete_version_async(
transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_version_async_from_dict():
await test_delete_version_async(request_type=dict)
def test_delete_version_field_headers():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_version_field_headers_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_version_flattened():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_version(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_version_flattened_error():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_version(
version.DeleteVersionRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_version_flattened_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_version(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_version_flattened_error_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_version(
version.DeleteVersionRequest(),
name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VersionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VersionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.VersionsGrpcTransport,
transports.VersionsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
],
)
def test_transport_kind(transport_name):
transport = VersionsClient.get_transport_class(transport_name)(
credentials=ga_credentials.AnonymousCredentials(),
)
assert transport.kind == transport_name
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.VersionsGrpcTransport,
)
def test_versions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_versions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_versions",
"get_version",
"create_version",
"update_version",
"delete_version",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Catch all for all remaining methods and properties
remainder = [
"kind",
]
for r in remainder:
with pytest.raises(NotImplementedError):
getattr(transport, r)()
def test_versions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_versions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport()
adc.assert_called_once()
def test_versions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VersionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VersionsGrpcTransport,
transports.VersionsGrpcAsyncIOTransport,
],
)
def test_versions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VersionsGrpcTransport, grpc_helpers),
(transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_versions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_versions_host_no_port(transport_name):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == ("dialogflow.googleapis.com:443")
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_versions_host_with_port(transport_name):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == ("dialogflow.googleapis.com:8000")
def test_versions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_versions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_version_path():
project = "squid"
version = "clam"
expected = "projects/{project}/agent/versions/{version}".format(
project=project,
version=version,
)
actual = VersionsClient.version_path(project, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "whelk",
"version": "octopus",
}
path = VersionsClient.version_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VersionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = VersionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(
folder=folder,
)
actual = VersionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = VersionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(
organization=organization,
)
actual = VersionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = VersionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(
project=project,
)
actual = VersionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = VersionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
actual = VersionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = VersionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VersionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(VersionsClient, transports.VersionsGrpcTransport),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
from flask import Flask
def create_app(flask_config):
app = Flask(__name__)
app.config.from_object('app.config.{}'.format(flask_config))
from app.api import api_bp
from app.client import client_bp
app.register_blueprint(api_bp)
app.register_blueprint(client_bp)
app.logger.info('>>> {}'.format(flask_config))
return app
|
from vkbottle.rule import FromMe
from vkbottle.user import Blueprint, Message
from idm_lp.logger import logger_decorator
from idm_lp.database import Database
from idm_lp.utils import edit_message
user = Blueprint(
name='disable_notifications_blueprint'
)
@user.on.message_handler(FromMe(), text="<prefix:service_prefix> выключать уведы")
@logger_decorator
async def allow_disable_notifications_wrapper(message: Message, **kwargs):
db = Database.get_current()
db.disable_notifications = True
db.save()
await edit_message(message, "✅ Настройка изменена")
@user.on.message_handler(FromMe(), text="<prefix:service_prefix> не выключать уведы")
@logger_decorator
async def deny_disable_notifications_wrapper(message: Message, **kwargs):
db = Database.get_current()
db.disable_notifications = False
db.save()
await edit_message(message, "✅ Настройка изменена")
|
"""
Copyright (c) 2020, creatable
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import aiohttp
import asyncio
import discord
import html
from discord.ext import commands
bot = commands.Bot(command_prefix='b!', description="""A utility bot for Reddit verification.
Copyright (c) 2020, creatable (https://creatable.cafe)""")
@bot.event
async def on_ready():
print("""
_ _ _
| | (_) | |
| |__ _ __ _ ___| | __
| '_ \| '__| |/ __| |/ /
| |_) | | | | (__| < _
|_.__/|_| |_|\___|_|\_(_)
by creatable""")
@bot.command()
async def verify(ctx, *args):
if len(args) != 0:
verifiedrole = discord.utils.get(ctx.guild.roles, name="Verified")
verifystring = f"""-----BEGIN BRICK VERIFICATION STRING-----
{ctx.author.id}
-----END BRICK VERIFICATION STRING-----"""
if verifiedrole in ctx.author.roles:
await ctx.send("ERROR: You're already verified!")
else:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://www.reddit.com/user/{args[0]}/about.json', allow_redirects = False) as response:
if response.status != 404:
desc = html.unescape((await response.json())["data"]["subreddit"]["public_description"])
if (verifystring) in desc:
await ctx.author.add_roles(verifiedrole)
await ctx.author.edit(nick = f"u/{args[0]}")
await ctx.send("""Successfully verified!
You can now remove the verification string from your profile at <https://new.reddit.com/settings/profile> if you want.""")
else:
await ctx.send(f"""Go to <https://new.reddit.com/settings/profile> and add the following block to your "About" section:
```{verifystring}```
Then do `b!verify {discord.utils.escape_mentions(args[0])}` again to verify your Reddit account.""")
else:
await ctx.send("ERROR: I can't find that user.")
else:
await ctx.send("ERROR: No arguments were provided.")
bot.run('')
|
#!/usr/bin/env python3
import argparse
import json
import jsonschema
import os
import sys
r"""
Validates the phosphor-regulators configuration file. Checks it against a JSON
schema as well as doing some extra checks that can't be encoded in the schema.
"""
def handle_validation_error():
sys.exit("Validation failed.")
def get_values(json_element, key, result = None):
r"""
Finds all occurrences of a key within the specified JSON element and its
children. Returns the associated values.
To search the entire configuration file, pass the root JSON element
json_element: JSON element within the config file.
key: key name.
result: list of values found with the specified key.
"""
if result is None:
result = []
if type(json_element) is dict:
for json_key in json_element:
if json_key == key:
result.append(json_element[json_key])
elif type(json_element[json_key]) in (list, dict):
get_values(json_element[json_key], key, result)
elif type(json_element) is list:
for item in json_element:
if type(item) in (list, dict):
get_values(item, key, result)
return result
def get_rule_ids(config_json):
r"""
Get all rule IDs in the configuration file.
config_json: Configuration file JSON
"""
rule_ids = []
for rule in config_json.get('rules', {}):
rule_ids.append(rule['id'])
return rule_ids
def get_device_ids(config_json):
r"""
Get all device IDs in the configuration file.
config_json: Configuration file JSON
"""
device_ids = []
for chassis in config_json.get('chassis', {}):
for device in chassis.get('devices', {}):
device_ids.append(device['id'])
return device_ids
def check_number_of_elements_in_masks(config_json):
r"""
Check if the number of bit masks in the 'masks' property matches the number
of byte values in the 'values' property.
config_json: Configuration file JSON
"""
i2c_write_bytes = get_values(config_json, 'i2c_write_bytes')
i2c_compare_bytes = get_values(config_json, 'i2c_compare_bytes')
for object in i2c_write_bytes:
if 'masks' in object:
if len(object.get('masks', [])) != len(object.get('values', [])):
sys.stderr.write("Error: Invalid i2c_write_bytes action.\n"+\
"The masks array must have the same size as the values array. "+\
"masks: "+str(object.get('masks', []))+\
", values: "+str(object.get('values', []))+'.\n')
handle_validation_error()
for object in i2c_compare_bytes:
if 'masks' in object:
if len(object.get('masks', [])) != len(object.get('values', [])):
sys.stderr.write("Error: Invalid i2c_compare_bytes action.\n"+\
"The masks array must have the same size as the values array. "+\
"masks: "+str(object.get('masks', []))+\
", values: "+str(object.get('values', []))+'.\n')
handle_validation_error()
def check_rule_id_exists(config_json):
r"""
Check if a rule_id property specifies a rule ID that does not exist.
config_json: Configuration file JSON
"""
rule_ids = get_values(config_json, 'rule_id')
valid_rule_ids = get_rule_ids(config_json)
for rule_id in rule_ids:
if rule_id not in valid_rule_ids:
sys.stderr.write("Error: Rule ID does not exist.\n"+\
"Found rule_id value that specifies invalid rule ID "+\
rule_id+'\n')
handle_validation_error()
def check_device_id_exists(config_json):
r"""
Check if a device_id property specifies a device ID that does not exist.
config_json: Configuration file JSON
"""
device_ids = get_values(config_json, 'device_id')
valid_device_ids = get_device_ids(config_json)
for device_id in device_ids:
if device_id not in valid_device_ids:
sys.stderr.write("Error: Device ID does not exist.\n"+\
"Found device_id value that specifies invalid device ID "+\
device_id+'\n')
handle_validation_error()
def check_set_device_value_exists(config_json):
r"""
Check if a set_device action specifies a device ID that does not exist.
config_json: Configuration file JSON
"""
device_ids = get_values(config_json, 'set_device')
valid_device_ids = get_device_ids(config_json)
for device_id in device_ids:
if device_id not in valid_device_ids:
sys.stderr.write("Error: Device ID does not exist.\n"+\
"Found set_device action that specifies invalid device ID "+\
device_id+'\n')
handle_validation_error()
def check_run_rule_value_exists(config_json):
r"""
Check if any run_rule actions specify a rule ID that does not exist.
config_json: Configuration file JSON
"""
rule_ids = get_values(config_json, 'run_rule')
valid_rule_ids = get_rule_ids(config_json)
for rule_id in rule_ids:
if rule_id not in valid_rule_ids:
sys.stderr.write("Error: Rule ID does not exist.\n"+\
"Found run_rule action that specifies invalid rule ID "+\
rule_id+'\n')
handle_validation_error()
def check_infinite_loops_in_rule(config_json, rule_json, call_stack=[]):
r"""
Check if a 'run_rule' action in the specified rule causes an
infinite loop.
config_json: Configuration file JSON.
rule_json: A rule in the JSON config file.
call_stack: Current call stack of rules.
"""
call_stack.append(rule_json['id'])
for action in rule_json.get('actions', {}):
if 'run_rule' in action:
run_rule_id = action['run_rule']
if run_rule_id in call_stack:
call_stack.append(run_rule_id)
sys.stderr.write(\
"Infinite loop caused by run_rule actions.\n"+\
str(call_stack)+'\n')
handle_validation_error()
else:
for rule in config_json.get('rules', {}):
if rule['id'] == run_rule_id:
check_infinite_loops_in_rule(\
config_json, rule, call_stack)
call_stack.pop()
def check_infinite_loops(config_json):
r"""
Check if rule in config file is called recursively, causing an
infinite loop.
config_json: Configuration file JSON
"""
for rule in config_json.get('rules', {}):
check_infinite_loops_in_rule(config_json, rule)
def check_duplicate_object_id(config_json):
r"""
Check that there aren't any JSON objects with the same 'id' property value.
config_json: Configuration file JSON
"""
json_ids = get_values(config_json, 'id')
unique_ids = set()
for id in json_ids:
if id in unique_ids:
sys.stderr.write("Error: Duplicate ID.\n"+\
"Found multiple objects with the ID "+id+'\n')
handle_validation_error()
else:
unique_ids.add(id)
def check_duplicate_rule_id(config_json):
r"""
Check that there aren't any "rule" elements with the same 'id' field.
config_json: Configuration file JSON
"""
rule_ids = []
for rule in config_json.get('rules', {}):
rule_id = rule['id']
if rule_id in rule_ids:
sys.stderr.write("Error: Duplicate rule ID.\n"+\
"Found multiple rules with the ID "+rule_id+'\n')
handle_validation_error()
else:
rule_ids.append(rule_id)
def check_duplicate_chassis_number(config_json):
r"""
Check that there aren't any "chassis" elements with the same 'number' field.
config_json: Configuration file JSON
"""
numbers = []
for chassis in config_json.get('chassis', {}):
number = chassis['number']
if number in numbers:
sys.stderr.write("Error: Duplicate chassis number.\n"+\
"Found multiple chassis with the number "+str(number)+'\n')
handle_validation_error()
else:
numbers.append(number)
def check_duplicate_device_id(config_json):
r"""
Check that there aren't any "devices" with the same 'id' field.
config_json: Configuration file JSON
"""
device_ids = []
for chassis in config_json.get('chassis', {}):
for device in chassis.get('devices', {}):
device_id = device['id']
if device_id in device_ids:
sys.stderr.write("Error: Duplicate device ID.\n"+\
"Found multiple devices with the ID "+device_id+'\n')
handle_validation_error()
else:
device_ids.append(device_id)
def check_duplicate_rail_id(config_json):
r"""
Check that there aren't any "rails" with the same 'id' field.
config_json: Configuration file JSON
"""
rail_ids = []
for chassis in config_json.get('chassis', {}):
for device in chassis.get('devices', {}):
for rail in device.get('rails', {}):
rail_id = rail['id']
if rail_id in rail_ids:
sys.stderr.write("Error: Duplicate rail ID.\n"+\
"Found multiple rails with the ID "+rail_id+'\n')
handle_validation_error()
else:
rail_ids.append(rail_id)
def check_for_duplicates(config_json):
r"""
Check for duplicate ID.
"""
check_duplicate_rule_id(config_json)
check_duplicate_chassis_number(config_json)
check_duplicate_device_id(config_json)
check_duplicate_rail_id(config_json)
check_duplicate_object_id(config_json)
def validate_schema(config, schema):
r"""
Validates the specified config file using the specified
schema file.
config: Path of the file containing the config JSON
schema: Path of the file containing the schema JSON
"""
with open(config) as config_handle:
config_json = json.load(config_handle)
with open(schema) as schema_handle:
schema_json = json.load(schema_handle)
try:
jsonschema.validate(config_json, schema_json)
except jsonschema.ValidationError as e:
print(e)
handle_validation_error()
return config_json
def validate_JSON_format(file):
with open(file) as json_data:
try:
return json.load(json_data)
except ValueError as err:
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='phosphor-regulators configuration file validator')
parser.add_argument('-s', '--schema-file', dest='schema_file',
help='The phosphor-regulators schema file')
parser.add_argument('-c', '--configuration-file', dest='configuration_file',
help='The phosphor-regulators configuration file')
args = parser.parse_args()
if not args.schema_file:
parser.print_help()
sys.exit("Error: Schema file is required.")
if not os.path.exists(args.schema_file):
parser.print_help()
sys.exit("Error: Schema file does not exist.")
if not os.access(args.schema_file, os.R_OK):
parser.print_help()
sys.exit("Error: Schema file is not readable.")
if not validate_JSON_format(args.schema_file):
parser.print_help()
sys.exit("Error: Schema file is not in the JSON format.")
if not args.configuration_file:
parser.print_help()
sys.exit("Error: Configuration file is required.")
if not os.path.exists(args.configuration_file):
parser.print_help()
sys.exit("Error: Configuration file does not exist.")
if not os.access(args.configuration_file, os.R_OK):
parser.print_help()
sys.exit("Error: Configuration file is not readable.")
if not validate_JSON_format(args.configuration_file):
parser.print_help()
sys.exit("Error: Configuration file is not in the JSON format.")
config_json = validate_schema(args.configuration_file, args.schema_file)
check_for_duplicates(config_json)
check_infinite_loops(config_json)
check_run_rule_value_exists(config_json)
check_set_device_value_exists(config_json)
check_rule_id_exists(config_json)
check_device_id_exists(config_json)
check_number_of_elements_in_masks(config_json)
|
import requests
from bs4 import BeautifulSoup
class API:
def __init__(self, auth):
self.auth = auth
self.api = 'https://api.playr.gg/api/enter'
self.headers = {
'Accept': "application/json, text/plain, */*",
'Accept-Encoding': "gzip, deflate, br",
'Accept-Language': "en-GB, en;q=0.5",
'Authorization': self.auth, # an authentication is needed other we cannot use it and a response will say 'Missing JWT Token'
'Host': "api.playr.gg",
'Origin': 'https://playr.gg',
'sec-fetch-dest': "empty",
'sec-fetch-mode': "cors",
'sec-fetch-site': "same-site",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0"
}
self.params = {
"dry_run": False,
"entry_method": "playr_secret_code"
}
def send_post(self, params={}):
"""
Send a POST request to the API
:return: text
"""
self.params.update(params)
r = requests.post(self.api, params=self.params, headers=self.headers) # sending the post request
self.params = { # resetting the params
"dry_run": False,
"entry_method": "playr_secret_code"
}
return r.text # returning the response
@staticmethod
def get_auth():
try:
r = requests.get('https://pastebin.com/UMWjEWdg').text
except:
return 'None'
soup = BeautifulSoup(r, 'lxml')
return soup.find(class_='de1').text
|
from django.urls import reverse
from rest_framework import status
from main.tests.api import helpers
class TestPermissions(helpers.BaseUserTestCase):
"""
Test Permissions
Get: authenticated
Update: admin
Create: admin
Delete: admin
"""
def test_get(self):
urls = [
reverse('api:program-list'),
reverse('api:program-detail', kwargs={'pk': self.program_1.pk})
]
access = {
"forbidden": [self.anonymous_client],
"allowed": [
self.readonly_client,
self.custodian_1_client,
self.custodian_2_client,
self.data_engineer_1_client,
self.data_engineer_2_client,
self.admin_client
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.get(url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.get(url).status_code,
status.HTTP_200_OK
)
def test_create(self):
"""
Only admin
:return:
"""
urls = [reverse('api:program-list')]
data = {
"name": "A new program for Unit test",
"code": "T1234",
"data_engineers": [self.data_engineer_1_user.pk]
}
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_1_client,
self.custodian_2_client,
self.data_engineer_1_client,
self.data_engineer_2_client
],
"allowed": [
self.admin_client,
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.post(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
# Name must me unique
data['name'] += '1'
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
def test_put(self):
"""
Only admin
:return:
"""
urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})]
data = {
"name": "A new program for Unit test",
"code": "T1234",
"data_engineers": [self.data_engineer_1_user.pk]
}
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_1_client,
self.custodian_2_client,
self.data_engineer_1_client,
self.data_engineer_2_client
],
"allowed": [
self.admin_client,
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.put(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
# Name must me unique
data['name'] += '1'
self.assertEqual(
client.put(url, data, format='json').status_code,
status.HTTP_200_OK
)
def test_patch(self):
"""
Only admin
:return:
"""
urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})]
data = {
"code": "XXXX",
}
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_1_client,
self.custodian_2_client,
self.data_engineer_1_client,
self.data_engineer_2_client
],
"allowed": [
self.admin_client,
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.patch(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.patch(url, data, format='json').status_code,
status.HTTP_200_OK
)
def test_delete(self):
"""
Admin only
:return:
"""
urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})]
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_1_client,
self.custodian_2_client,
self.data_engineer_1_client,
self.data_engineer_2_client
],
"allowed": [
self.admin_client,
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.delete(url, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.delete(url, format='json').status_code,
status.HTTP_204_NO_CONTENT
)
|
from rest_framework import serializers
from .models import Movies
class MoviesSerializer(serializers.ModelSerializer):
class Meta:
model = Movies
fields = [
'id' , 'user_main', 'title', 'director', 'acts', 'created_at'
]
|
import os
INSTALLED_APPS = [
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages', 'django.contrib.sessions',
'django.contrib.admin',
'octopus',
'test_app',
'django.contrib.sites'
]
SECRET_KEY = '1'
DEBUG = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
)
APPEND_SLASHES = True
root_dir = os.path.dirname(os.path.realpath(__file__))
STATIC_ROOT = os.path.join(root_dir, 'static')
# STATICFILES_DIRS = [STATIC_ROOT]
print(STATIC_ROOT)
TEMPLATE_DIRECTORIES = (os.path.join(root_dir, 'test_app/templates'))
MIDDLEWARE_CLASSES = ('django.middleware.csrf.CsrfViewMiddleware',)
ROOT_URLCONF = "test_app.urls"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.db',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATE_DIRECTORIES,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnabla.logger import logger
import nnabla.function as F
def print_network_traceback(funcs):
logger.critical('Network traceback:')
for i, func in enumerate(funcs):
logger.critical('{}{}'.format(
'->' if i == len(funcs) - 1 else ' ', func.name))
class Network:
def setup_function(self, func):
try:
func.function_instance.setup(
func.variable_inputs, func.variable_outputs)
except:
logger.critical('An error occurred while setup of function {} (nn.{}) in network {}'.format(
func.name, func.function_instance.name, self.name))
logger.critical('Input variables:')
for v in func.inputs:
logger.critical(' {} (shape: {}, design_shape: {})'.format(
v.name, str(v.variable_instance.shape), str(v.shape)))
logger.critical('Output variables:')
for v in func.outputs:
logger.critical(' {} (shape: {}, design_shape: {})'.format(
v.name, str(v.variable_instance.shape), str(v.shape)))
raise
# logger.debug('Setup: {} {}'.format(func.name, func.function_instance.name))
def get_forward_sequence(self, loss_variables):
forward_sequence = []
for func in self.functions.values():
func.forward_complete = False
for loss in loss_variables:
self.__forward_recursive(forward_sequence, variable=loss)
return forward_sequence
def __forward_recursive(self, forward_sequence, variable=None, function=None):
if not function and variable not in self.variable_inputs:
return
for func in [function] if function else self.variable_inputs[variable]:
if func.forward_complete:
continue
for input_function in func.input_functions:
self.__forward_recursive(
forward_sequence, function=input_function)
forward_sequence.append(func)
func.forward_complete = True
def forward(self, forward_sequence):
for func in forward_sequence:
try:
self.forward_function(func)
except:
index = forward_sequence.index(func)
print_network_traceback(
forward_sequence[max(0, index - 4):index + 1])
raise
def forward_function(self, func):
try:
# Uncomment when debugging expand_recurrent
# print(func.name)
# print(func.function_instance)
# for n, inp in enumerate(func.variable_inputs):
# print(' IN:', n, inp.shape, inp.d.flatten()[0])
func.function_instance.forward(
func.variable_inputs, func.variable_outputs)
# Uncomment when debugging expand_recurrent
# for n, out in enumerate(func.variable_outputs):
# print(' OUT:', n, out.shape, out.d.flatten()[0])
except:
logger.critical('An error occurred while executing forward of function {} (nn.{}) in network {}'.format(
func.name, func.function_instance.name, self.name))
raise
def get_backward_sequence(self, loss_variables, parameter_variables_and_locallr):
class BackwardSequence:
loss_variables = []
variables = []
grad_variables = []
unused_variables = []
parameters = []
sequence = []
backward_sequence = BackwardSequence()
backward_sequence.loss_variables = [
v.variable_instance for v in loss_variables]
for p, lr in parameter_variables_and_locallr.items():
if lr > 0.0:
backward_sequence.parameters.append(p.variable_instance)
for func in self.functions.values():
func.backward_complete = False
for p, local_lr in parameter_variables_and_locallr.items():
if local_lr > 0.0:
self.__backward_recursive(
backward_sequence, loss_variables, variable=p)
for seq in backward_sequence.sequence:
backward_sequence.variables.extend(seq.func.variable_outputs)
for v in self.variables.values():
vi = v.variable_instance
if vi not in backward_sequence.variables and vi not in backward_sequence.parameters:
backward_sequence.unused_variables.append(vi)
return backward_sequence
def __backward_recursive(self, backward_sequence, loss_variables, variable=None, function=None):
# logger.debug('bwcall: {}'.format(function.name if function else ''))
if not function and variable not in self.variable_outputs:
# terminal variable
return variable in self.loss_variables
diff_exists = False
for func in [function] if function else self.variable_outputs[variable]:
if func.backward_complete:
diff_exists = True
continue
func.backward_complete = True
for output_function in func.output_functions:
if func.output_functions:
diff = self.__backward_recursive(
backward_sequence, loss_variables, function=output_function)
diff_exists = diff_exists or diff
else:
# terminal function
for v in loss_variables:
diff_exists = diff_exists or (v in func.outputs)
if diff_exists:
if backward_sequence is not None:
class BackwardSequenceItem:
func = None
accum_grad = []
seq = BackwardSequenceItem()
seq.func = func
for i, v in enumerate(func.variable_inputs):
accum = (
v in backward_sequence.grad_variables or v in backward_sequence.parameters) and not func.function_instance.inplace_grad(i)
seq.accum_grad.append(accum)
if not v in backward_sequence.grad_variables:
backward_sequence.grad_variables.append(v)
backward_sequence.sequence.append(seq)
return diff_exists
def prepare_backward(self, backward_sequence, parameter_zero_grad=True):
for v in backward_sequence.unused_variables:
v.need_grad = False
for p in backward_sequence.parameters:
p.need_grad = True
if parameter_zero_grad:
p.grad.zero()
for v in backward_sequence.variables:
v.need_grad = True
for l in backward_sequence.loss_variables:
l.grad.fill(1.0 / l.size)
def backward(self, backward_sequence, parameter_zero_grad=True):
self.prepare_backward(backward_sequence, parameter_zero_grad)
for seq in backward_sequence.sequence:
try:
self.backward_function(seq)
except:
index = backward_sequence.sequence.index(seq)
print_network_traceback(
[seq.func for seq in backward_sequence.sequence[max(0, index - 4):index + 1]])
raise
def backward_function(self, seq):
try:
seq.func.function_instance.backward(
seq.func.variable_inputs, seq.func.variable_outputs, seq.accum_grad)
except:
logger.critical('An error occurred while executing backward of function {} (nn.{}) in network {}'.format(
seq.func.name, seq.func.function_instance.name, self.name))
raise
# logger.debug('Backward: {} {}'.format(func.name, func.function_instance.name))
def setup(self, optimize=False):
if optimize:
for func in list(self.functions.values()):
# remove identity layer
if func.function_instance.name[0:8] == "Identity":
assert(len(func.inputs) == 1)
assert(len(func.outputs) == 1)
# if the identity function is not terminal (keep terminal
# identity function)
if func.outputs[0] in self.variable_outputs:
next_functions = self.variable_outputs[func.outputs[0]]
self.variable_outputs[func.inputs[0]].remove(func)
self.variable_outputs[
func.inputs[0]].extend(next_functions)
for next_function in next_functions:
next_function.inputs = [func.inputs[0] if v == func.outputs[
0] else v for v in next_function.inputs]
del self.functions[func.name]
del self.variables[func.outputs[0].name]
# create variable instances
for variable in self.variables.values():
if variable.variable_instance.shape != variable.shape:
if hasattr(variable.variable_instance, 'reset_shape'):
variable.variable_instance.reset_shape(
variable.shape, force=True)
else:
variable.variable_instance.reshape(
variable.shape, force=True)
# setup functions
for i, func in enumerate(self.functions.values()):
func.variable_inputs = [v.variable_instance for v in func.inputs]
func.variable_outputs = [v.variable_instance for v in func.outputs]
try:
self.setup_function(func)
except:
print_network_traceback(list(self.functions.values())[
max(0, i - 4):i + 1])
raise
# set link structure to each layer
from itertools import chain
for func in self.functions.values():
func.input_functions = list(chain.from_iterable(
[self.variable_inputs[v] for v in func.inputs if v in self.variable_inputs]))
func.output_functions = list(chain.from_iterable(
[self.variable_outputs[v] for v in func.outputs if v in self.variable_outputs]))
logger.debug(func.name)
logger.debug(' in: {}'.format(
[f.name for f in func.input_functions]))
logger.debug(' out: {}'.format(
[f.name for f in func.output_functions]))
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: flatbuf
import flatbuffers
class Interval(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsInterval(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Interval()
x.Init(buf, n + offset)
return x
# Interval
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Interval
def Unit(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 0
def IntervalStart(builder): builder.StartObject(1)
def IntervalAddUnit(builder, unit): builder.PrependInt16Slot(0, unit, 0)
def IntervalEnd(builder): return builder.EndObject()
|
import re
from django import forms
from django.utils.safestring import mark_safe
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, ReadOnlyPasswordHashField
from .models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField(required=True)
first_name = forms.Textarea()
last_name = forms.Textarea()
class Meta:
model = User
fields = ['username','email','first_name','last_name','password1','password2',]
widgets = {
'username': forms.fields.TextInput(attrs={'placeholder': 'username'}),
'email': forms.fields.TextInput(attrs={'placeholder': 'example@foodieshoot.com'}),
'first_name': forms.fields.TextInput(attrs={'placeholder': 'First name'}),
'last_name': forms.fields.TextInput(attrs={'placeholder': 'Last name'}),
}
def clean_email(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if email and User.objects.filter(email=email).exclude(username=username).exists():
raise forms.ValidationError(u'Email addresses must be unique.')
return email
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
def __init__(self, *args, **kwargs):
super(UserUpdateForm, self).__init__(*args, **kwargs)
for fieldname in ['username','email',]:
self.fields[fieldname].help_text = None
class Meta:
model = User
fields = ['username', 'email',]
def clean_email(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if email and User.objects.filter(email=email).exclude(username=username).exists():
raise forms.ValidationError(u'Email addresses must be unique.')
return email
class ProfileUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
for fieldname in ['image',]:
self.fields[fieldname].help_text = None
class Meta:
model = Profile
fields = ['image']
|
import sys
import os
import yaml #PyYAML must be installed
import languageSwitcher
import CPlusPlusLanguageSwitcher
import CLanguageSwitcher
import JavaLanguageSwitcher
import PythonLanguageSwitcher
from UnsupportedLanguageException import *
sys.path.append("../util")
from Util import supportedLanguages
class LanguageSwitcherFactory:
extMap = {}
@staticmethod
def loadLanguageMap(langFile = "../../Resources/languages.yml"):
with open(langFile, 'r') as f:
LanguageSwitcherFactory.extMap = yaml.safe_load(f)
#Create a new language switcher of the correct type.
@staticmethod
def createLS(language):
if(LanguageSwitcherFactory.extMap == {}):
LanguageSwitcherFactory.loadLanguageMap("../../Resources/languages.yml")
return LanguageSwitcherFactory.determineLanguage(language)
#String -> String
#Given either a language name or a file extension for a language, return a normalized language string
#to use
@staticmethod
def determineLanguage(language): #Replace these with tokens?
language = language.strip()
#Check for names
if(language.lower() == "c++" or language.lower() in LanguageSwitcherFactory.extMap["C++"]["extensions"]):
return CPlusPlusLanguageSwitcher.CPlusPlusLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C++"]["extensions"]))
elif(language.lower() == "c" or language.lower() in LanguageSwitcherFactory.extMap["C"]["extensions"]):
return CLanguageSwitcher.CLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C"]["extensions"]))
elif(language.lower() == "java" or language.lower() in LanguageSwitcherFactory.extMap["Java"]["extensions"]):
return JavaLanguageSwitcher.JavaLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Java"]["extensions"]))
elif(language.lower() == "python" or language.lower() in LanguageSwitcherFactory.extMap["Python"]["extensions"]):
return PythonLanguageSwitcher.PythonLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Python"]["extensions"]))
else:
print((LanguageSwitcherFactory.extMap["C"]["extensions"]))
raise UnsupportedLanguageException(language + " not yet supported.")
@staticmethod
def getExtensions(languages):
'''
Given some languages, return the set of extensions associated with them. If no languages
are given or none in the set are recognized, return the extensions for all recognized languages.
If only a portion are recognized, return the set of extensions for just these languages.
'''
extensions = set()
for l in languages:
try:
extensions.update(LanguageSwitcherFactory.createLS(l).getExtensions())
except UnsupportedLanguageException: #skip unrecognized languages
pass
if (len(extensions) == 0):
return getExtensions(supportedLanguages)
else:
return extensions
|
from typing import Union, Dict, List
from src.contexts.shared.domain.errors.DomainError import DomainError
class CryptoKeyInvalidValueError(DomainError):
ERROR_ID = '8fd818c5-10dc-4639-82ac-d4b37394517d'
def __init__(self, msg: str = None):
if msg is None:
msg = 'Invalid value for CryptoKey found.'
self.message = msg
def to_primitives(self) -> Union[Dict, List]:
return {
'message': self.message,
'id': self.ERROR_ID,
}
def get_id(self) -> str:
return self.ERROR_ID
|
# -*- coding: utf-8 -*-
__author__ = 'Ed Patrick Tan'
__email__ = 'pat.keeps.looking.up@gmail.com'
__version__ = '0.1.0'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
__version__ = '1.0.1'
setup(
name='google_music_manager_auth',
python_requires=">=3",
version=__version__,
packages=find_packages(),
author="Jay MOULIN",
author_email="jaymoulin@gmail.com",
description="Google MusicManager package to manage your music library to Google Music - Auth module",
long_description=open('README.rst').read(),
install_requires=["gmusicapi"],
include_package_data=True,
url='http://github.com/jaymoulin/google-music-manager/',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Communications :: File Sharing",
"Topic :: Artistic Software",
"Topic :: Internet :: File Transfer Protocol (FTP)",
"Topic :: Home Automation",
"Topic :: Internet",
"Topic :: Multimedia :: Sound/Audio",
],
entry_points={
'console_scripts': [
'google-music-auth = google_music_manager_auth.auth:main',
],
},
license="MIT",
)
|
#
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import Counter
from collections import defaultdict
from collections import OrderedDict
from functools import partial
from itertools import chain
import io
from operator import itemgetter
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
import traceback
import attr
from license_expression import Licensing
from commoncode.fileutils import copyfile
from commoncode.fileutils import file_base_name
from commoncode.fileutils import file_name
from commoncode.fileutils import resource_iter
from commoncode import saneyaml
from licensedcode import MIN_MATCH_HIGH_LENGTH
from licensedcode import MIN_MATCH_LENGTH
from licensedcode import SMALL_RULE
from licensedcode.tokenize import query_tokenizer
from textcode.analysis import numbered_text_lines
"""
Reference License and license Rule structures persisted as a combo of a YAML
data file and one or more text files containing license or notice texts.
"""
# Set to True to print more detailed representations of objects when tracing
TRACE_REPR = False
# these are globals but always side-by-side with the code so do no not move them around
data_dir = join(abspath(dirname(__file__)), 'data')
licenses_data_dir = join(data_dir, 'licenses')
rules_data_dir = join(data_dir, 'rules')
FOSS_CATEGORIES = set([
'Copyleft',
'Copyleft Limited',
'Patent License',
'Permissive',
'Public Domain',
])
OTHER_CATEGORIES = set([
'Commercial',
'Proprietary Free',
'Free Restricted',
'Source-available',
'Unstated License',
])
CATEGORIES = FOSS_CATEGORIES | OTHER_CATEGORIES
@attr.s(slots=True)
class License(object):
"""
A license consists of these files, where <key> is the license key:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
A License object is identified by a unique `key` and its data stored in the
`src_dir` directory. Key is a lower-case unique ascii string.
"""
__attrib = partial(attr.ib, repr=False)
# unique key: lower case ASCII characters, digits, underscore and dots.
key = attr.ib(default=None, repr=True)
src_dir = __attrib(default=licenses_data_dir)
# if this is a deprecated license, add also notes explaining why
is_deprecated = __attrib(default=False)
# if this license text is not in English, set this field to a two letter
# ISO 639-1 language code https://en.wikipedia.org/wiki/ISO_639-1
# NOTE: this is not yet supported.
# NOTE: each translation of a license text MUST have a different license key
language = __attrib(default='en')
# commonly used short name, often abbreviated.
short_name = __attrib(default=None)
# full name.
name = __attrib(default=None)
# Permissive, Copyleft, etc
category = __attrib(default=None)
owner = __attrib(default=None)
homepage_url = __attrib(default=None)
notes = __attrib(default=None)
# if this is a license exception, the license key this exception applies to
is_exception = __attrib(default=False)
# SPDX key for SPDX licenses
spdx_license_key = __attrib(default=None)
# list of other keys, such as deprecated ones
other_spdx_license_keys = __attrib(default=attr.Factory(list))
# OSI License Key
osi_license_key = __attrib(default=None)
# Various URLs for info
text_urls = __attrib(default=attr.Factory(list))
osi_url = __attrib(default=None)
faq_url = __attrib(default=None)
other_urls = __attrib(default=attr.Factory(list))
# various alternate keys for this license
key_aliases = __attrib(default=attr.Factory(list))
minimum_coverage = __attrib(default=0)
standard_notice = __attrib(default=None)
# lists of copuyrights, emails and URLs that can be ignored when detected
# in this license as they are part of the license or rule text itself
ignorable_copyrights = __attrib(default=attr.Factory(list))
ignorable_authors = __attrib(default=attr.Factory(list))
ignorable_holders = __attrib(default=attr.Factory(list))
ignorable_urls = __attrib(default=attr.Factory(list))
ignorable_emails = __attrib(default=attr.Factory(list))
# data file paths and known extensions
data_file = __attrib(default=None)
text_file = __attrib(default=None)
def __attrs_post_init__(self, *args, **kwargs):
if self.src_dir:
self.set_file_paths()
if exists(self.data_file):
self.load()
def set_file_paths(self):
self.data_file = join(self.src_dir, self.key + '.yml')
self.text_file = join(self.src_dir, self.key + '.LICENSE')
def relocate(self, target_dir, new_key=None):
"""
Return f copy of this license object relocated to f new `src_dir`.
The data and license text files are persisted in the new `src_dir`.
"""
if not target_dir or target_dir == self.src_dir:
raise ValueError(
'Cannot relocate {} License to empty directory or same directory.'.format(self.key))
if new_key:
key = new_key
else:
key = self.key
newl = License(key, target_dir)
# copy fields
excluded_fields = ('key', 'src_dir', 'data_file', 'text_file',)
all_fields = attr.fields(self.__class__)
attrs = [f.name for f in all_fields if f.name not in excluded_fields]
for name in attrs:
setattr(newl, name, getattr(self, name))
# save it all to files
if self.text:
copyfile(self.text_file, newl.text_file)
newl.dump()
return newl
def update(self, mapping):
for k, v in mapping.items():
setattr(self, k, v)
def __copy__(self):
oldl = self.to_dict()
newl = License(key=self.key)
newl.update(oldl)
return newl
@property
def text(self):
"""
License text, re-loaded on demand.
"""
return self._read_text(self.text_file)
def to_dict(self):
"""
Return an OrderedDict of license data (excluding texts).
Fields with empty values are not included.
"""
# do not dump false, empties and paths
def dict_fields(attr, value):
if not value:
return False
if attr.name in ('data_file', 'text_file', 'src_dir',):
return False
# default to English
if attr.name == 'language' and value == 'en':
return False
if attr.name == 'minimum_coverage' and value == 100:
return False
return True
data = attr.asdict(self, filter=dict_fields, dict_factory=OrderedDict)
cv = data.get('minimum_coverage')
if cv and isinstance(cv, float) and int(cv) == cv:
cv = int(cv)
data['minimum_coverage'] = cv
return data
def dump(self):
"""
Dump a representation of this license as two files:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
"""
def write(location, byte_string):
# we write as binary because rules and licenses texts and data are UTF-8-encoded bytes
with io.open(location, 'wb') as of:
of.write(byte_string)
as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8')
write(self.data_file, as_yaml)
if self.text:
write(self.text_file, self.text.encode('utf-8'))
def load(self):
"""
Populate license data from a YAML file stored in of self.src_dir.
Does not load text files.
Unknown fields are ignored and not bound to the License object.
"""
try:
with io.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
numeric_keys = ('minimum_coverage', 'relevance')
for k, v in data.items():
if k in numeric_keys:
v = int(v)
if k == 'key':
assert self.key == v, 'Inconsistent YAML key and file names for %r' % self.key
setattr(self, k, v)
except Exception as e:
# this is a rare case: fail loudly
print()
print('#############################')
print('INVALID LICENSE YAML FILE:', 'file://' + self.data_file)
print('#############################')
print(e)
print('#############################')
raise
def _read_text(self, location):
if not exists(location):
text = ''
else:
with io.open(location, encoding='utf-8') as f:
text = f.read()
return text
def spdx_keys(self):
"""
Yield SPDX keys for this license.
"""
if self.spdx_license_key:
yield self.spdx_license_key
for key in self.other_spdx_license_keys:
yield key
@staticmethod
def validate(licenses, verbose=False, no_dupe_urls=False):
"""
Check that licenses are valid. `licenses` is a mapping of key ->
License. Return dictionaries of infos, errors and warnings mapping a
license key to validation issue messages. Print messages if verbose is
True.
NOTE: we DO NOT run this validation as part of the loading or
construction of License objects. Instead this is invoked ONLY as part of
the test suite.
"""
infos = defaultdict(list)
warnings = defaultdict(list)
errors = defaultdict(list)
# used for global dedupe of texts
by_spdx_key = defaultdict(list)
by_text = defaultdict(list)
by_short_name = defaultdict(list)
by_name = defaultdict(list)
for key, lic in licenses.items():
warn = warnings[key].append
info = infos[key].append
error = errors[key].append
by_name[lic.name].append(lic)
by_short_name[lic.short_name].append(lic)
if not lic.short_name:
error('No short name')
if not lic.name:
error('No name')
if not lic.category:
error('No category')
if lic.category and lic.category not in CATEGORIES:
cats = '\n'.join(sorted(CATEGORIES))
error('Unknown license category: {}.\nUse one of these valid categories:\n{}'.format(lic.category, cats))
if not lic.owner:
error('No owner')
# URLS dedupe and consistency
if no_dupe_urls:
if lic.text_urls and not all(lic.text_urls):
warn('Some empty text_urls values')
if lic.other_urls and not all(lic.other_urls):
warn('Some empty other_urls values')
# redundant URLs used multiple times
if lic.homepage_url:
if lic.homepage_url in lic.text_urls:
warn('Homepage URL also in text_urls')
if lic.homepage_url in lic.other_urls:
warn('Homepage URL also in other_urls')
if lic.homepage_url == lic.faq_url:
warn('Homepage URL same as faq_url')
if lic.homepage_url == lic.osi_url:
warn('Homepage URL same as osi_url')
if lic.osi_url or lic.faq_url:
if lic.osi_url == lic.faq_url:
warn('osi_url same as faq_url')
all_licenses = lic.text_urls + lic.other_urls
for url in lic.osi_url, lic.faq_url, lic.homepage_url:
if url:
all_licenses.append(url)
if not len(all_licenses) == len(set(all_licenses)):
warn('Some duplicated URLs')
# local text consistency
text = lic.text
license_qtokens = tuple(query_tokenizer(text))
if not license_qtokens:
info('No license text')
else:
# for global dedupe
by_text[license_qtokens].append(key + ': TEXT')
# SPDX consistency
if lic.spdx_license_key:
by_spdx_key[lic.spdx_license_key].append(key)
for oslk in lic.other_spdx_license_keys:
by_spdx_key[oslk].append(key)
# global SPDX consistency
multiple_spdx_keys_used = {k: v for k, v in by_spdx_key.items() if len(v) > 1}
if multiple_spdx_keys_used:
for k, lkeys in multiple_spdx_keys_used.items():
errors['GLOBAL'].append('SPDX key: ' + k + ' used in multiple licenses: ' + ', '.join(sorted(lkeys)))
# global text dedupe
multiple_texts = {k: v for k, v in by_text.items() if len(v) > 1}
if multiple_texts:
for k, msgs in multiple_texts.items():
errors['GLOBAL'].append('Duplicate texts in multiple licenses:' + ', '.join(sorted(msgs)))
# global short_name dedupe
for short_name, licenses in by_short_name.items():
if len(licenses) == 1:
continue
errors['GLOBAL'].append('Duplicate short name:' + short_name + ' in licenses:' + ', '.join(l.key for l in licenses))
# global name dedupe
for name, licenses in by_name.items():
if len(licenses) == 1:
continue
errors['GLOBAL'].append('Duplicate name:' + name + ' in licenses:' + ', '.join(l.key for l in licenses))
errors = {k: v for k, v in errors.items() if v}
warnings = {k: v for k, v in warnings.items() if v}
infos = {k: v for k, v in infos.items() if v}
if verbose:
print('Licenses validation errors:')
for key, msgs in sorted(errors.items()):
print('ERRORS for:', key, ':', '\n'.join(msgs))
print('Licenses validation warnings:')
for key, msgs in sorted(warnings.items()):
print('WARNINGS for:', key, ':', '\n'.join(msgs))
print('Licenses validation infos:')
for key, msgs in sorted(infos.items()):
print('INFOS for:', key, ':', '\n'.join(msgs))
return errors, warnings, infos
def ignore_editor_tmp_files(location):
return location.endswith('.swp')
def load_licenses(licenses_data_dir=licenses_data_dir , with_deprecated=False):
"""
Return a mapping of key -> license objects, loaded from license files.
Raise Exceptions if there are dangling orphaned files.
"""
licenses = {}
used_files = set()
all_files = set(resource_iter(licenses_data_dir, ignored=ignore_editor_tmp_files, with_dirs=False))
for data_file in sorted(all_files):
if data_file.endswith('.yml'):
key = file_base_name(data_file)
lic = License(key, licenses_data_dir)
used_files.add(data_file)
if exists(lic.text_file):
used_files.add(lic.text_file)
if not with_deprecated and lic.is_deprecated:
continue
licenses[key] = lic
dangling = all_files.difference(used_files)
if dangling:
msg = 'Some License data or text files are orphaned in "{}".\n'.format(licenses_data_dir)
msg += '\n'.join('file://{}'.format(f) for f in sorted(dangling))
raise Exception(msg)
return licenses
def get_rules(licenses_data_dir=licenses_data_dir, rules_data_dir=rules_data_dir):
"""
Yield Rule objects loaded from license files found in `licenses_data_dir`
and rule files fourn in `rules_data_dir`. Raise a Exceptions if a rule is
inconsistent or incorrect.
"""
from licensedcode.cache import get_licenses_db
licenses = get_licenses_db(licenses_data_dir=licenses_data_dir)
rules = list(load_rules(rules_data_dir=rules_data_dir))
check_rules_integrity(rules, licenses)
licenses_as_rules = build_rules_from_licenses(licenses)
return chain(licenses_as_rules, rules)
class MissingLicenses(Exception):
pass
class MissingFlags(Exception):
pass
def check_rules_integrity(rules, licenses_by_key):
"""
Given a lists of `rules`, check that all the rule license keys reference a
known license from a mapping of `licenses_by_key `(key->license). Raise a
MissingLicense exception with a message containing the list of rule files
without a corresponding license.
"""
invalid_rules = defaultdict(set)
rules_without_flags = set()
for rule in rules:
unknown_keys = [key for key in rule.license_keys()
if key not in licenses_by_key]
if unknown_keys:
invalid_rules[rule.data_file].update(unknown_keys)
if not rule.has_flags and not (rule.is_negative or rule.is_false_positive):
rules_without_flags.add(rule.data_file)
if invalid_rules:
invalid_rules = (
' '.join(keys) + '\n' +
'file://' + data_file + '\n' +
'file://' + data_file.replace('.yml', '.RULE') + '\n'
for data_file, keys in invalid_rules.items() if keys)
msg = 'Rules referencing missing licenses:\n' + '\n'.join(sorted(invalid_rules))
raise MissingLicenses(msg)
if rules_without_flags:
invalid_rules = (
'file://' + data_file + '\n' +
'file://' + data_file.replace('.yml', '.RULE') + '\n'
for data_file in sorted(rules_without_flags))
msg = 'Rules without is_license_xxx flags:\n' + '\n'.join(sorted(invalid_rules))
raise MissingFlags(msg)
def build_rules_from_licenses(licenses):
"""
Return an iterable of rules built from each license text from a `licenses`
iterable of license objects.
"""
for license_key, license_obj in licenses.items():
text_file = join(license_obj.src_dir, license_obj.text_file)
if exists(text_file):
minimum_coverage = license_obj.minimum_coverage or 0
yield Rule(
text_file=text_file,
license_expression=license_key,
has_stored_relevance=False,
relevance=100,
has_stored_minimum_coverage=bool(minimum_coverage),
minimum_coverage=minimum_coverage,
is_license=True,
is_license_text=True,
ignorable_copyrights=license_obj.ignorable_copyrights,
ignorable_holders=license_obj.ignorable_holders,
ignorable_authors=license_obj.ignorable_authors,
ignorable_urls=license_obj.ignorable_urls,
ignorable_emails=license_obj.ignorable_emails,
)
def get_all_spdx_keys(licenses):
"""
Return an iterable of SPDX license keys collected from a `licenses` iterable
of license objects.
"""
for lic in licenses.values():
for spdx_key in lic.spdx_keys():
yield spdx_key
def get_essential_spdx_tokens():
"""
Yield essential SPDX tokens.
"""
yield 'spdx'
yield 'license'
yield 'licence'
yield 'identifier'
yield 'licenseref'
def get_all_spdx_key_tokens(licenses):
"""
Yield token strings collected from a `licenses` iterable of license objects'
SPDX license keys.
"""
for tok in get_essential_spdx_tokens():
yield tok
for spdx_key in get_all_spdx_keys(licenses):
for token in query_tokenizer(spdx_key):
yield token
def load_rules(rules_data_dir=rules_data_dir):
"""
Return an iterable of rules loaded from rule files.
"""
# TODO: OPTIMIZE: create a graph of rules to account for containment and
# similarity clusters?
seen_files = set()
processed_files = set()
lower_case_files = set()
case_problems = set()
space_problems = []
model_errors = []
for data_file in resource_iter(rules_data_dir, with_dirs=False):
if data_file.endswith('.yml'):
base_name = file_base_name(data_file)
if ' ' in base_name:
space_problems.append(data_file)
rule_file = join(rules_data_dir, base_name + '.RULE')
try:
rule = Rule(data_file=data_file, text_file=rule_file)
yield rule
except Exception as re:
model_errors.append(str(re))
# accumulate sets to ensures we do not have illegal names or extra
# orphaned files
data_lower = data_file.lower()
if data_lower in lower_case_files:
case_problems.add(data_lower)
else:
lower_case_files.add(data_lower)
rule_lower = rule_file.lower()
if rule_lower in lower_case_files:
case_problems.add(rule_lower)
else:
lower_case_files.add(rule_lower)
processed_files.update([data_file, rule_file])
if not data_file.endswith('~'):
seen_files.add(data_file)
unknown_files = seen_files - processed_files
if unknown_files or case_problems or model_errors or space_problems:
msg = ''
if model_errors:
errors = '\n'.join(model_errors)
msg += '\nInvalid rule YAML in directory: %(rules_data_dir)r\n%(errors)s' % locals()
if unknown_files:
files = '\n'.join(sorted('file://' + f for f in unknown_files))
msg += '\nOrphaned files in rule directory: %(rules_data_dir)r\n%(files)s' % locals()
if case_problems:
files = '\n'.join(sorted('file://' + f for f in case_problems))
msg += '\nRule files with non-unique name ignoring casein rule directory: %(rules_data_dir)r\n%(files)s' % locals()
if space_problems:
files = '\n'.join(sorted('"file://' + f + '"' for f in space_problems))
msg += '\nRule files name cannot contain spaces: %(rules_data_dir)r\n%(files)s' % locals()
raise Exception(msg)
@attr.s(slots=True)
class Rule(object):
"""
A detection rule object is a text to use for detection and corresponding
detected licenses and metadata.
"""
licensing = Licensing()
###########
# FIXME: !!! TWO RULES MAY DIFFER BECAUSE THEY ARE UPDATED BY INDEXING
###########
# optional rule id int typically assigned at indexing time
rid = attr.ib(default=None, repr=TRACE_REPR)
# unique identifier
identifier = attr.ib(default=None)
# License expression string
license_expression = attr.ib(default=None)
# License expression object, created at build time
license_expression_object = attr.ib(default=None, repr=False)
# an indication of what this rule importance is (e.g. how important is its
# text when detected as a licensing clue) as one of several flags:
# for a license full text: this provides the highest level of confidence wrt
# detection
is_license_text = attr.ib(default=False, repr=False)
# for a license notice: this provides a strong confidence wrt detection
is_license_notice = attr.ib(default=False, repr=False)
# reference for a mere short license reference such as its bare name or a URL
# this provides a weak confidence wrt detection
is_license_reference = attr.ib(default=False, repr=False)
# tag for a structured licensing tag such as a package manifest metadata or
# an SPDX license identifier or similar package manifest tag
# this provides a strong confidence wrt detection
is_license_tag = attr.ib(default=False, repr=False)
# is this rule text a false positive when matched? it will filtered out at
# the end if matched
is_false_positive = attr.ib(default=False, repr=False)
# is this rule text a negative rule? it will be removed from the matchable
# text the start if matched
is_negative = attr.ib(default=False, repr=False)
# is this rule text only to be matched with a minimum coverage e.g. a
# minimum proportion of tokens as a float between 0 and 100 where 100 means
# all tokens must be matched and a smaller value means a smaller propertion
# of matched tokens is acceptable. this is computed unless this is provided
# here.
minimum_coverage = attr.ib(default=0)
has_stored_minimum_coverage = attr.ib(default=False, repr=False)
# same as minimum_coverage but divided/100
_minimum_containment = attr.ib(default=0, repr=False)
# Can this rule be matched if there are unknown words in its matched range?
# The default is to allow known and unknown words. Unknown words are words
# that do not exist in the text of any indexed license or license detection
# rule.
only_known_words = attr.ib(default=False)
# what is the relevance of a match to this rule text? a float between 0 and
# 100 where 100 means highly relevant and 0 menas not relevant at all.
# For instance a match to the "gpl" or the "cpol" words have a fairly low
# relevance as they are a weak indication of an actual license and could be
# a false positive. In somce cases, this may even be used to discard obvious
# false positive matches automatically.
relevance = attr.ib(default=100)
has_stored_relevance = attr.ib(default=False, repr=False)
# The rule contains a reference to some file name that comtains the text
referenced_filenames = attr.ib(default=attr.Factory(list), repr=False)
# optional, free text
notes = attr.ib(default=None, repr=False)
# set to True if the rule is built from a .LICENSE full text
is_license = attr.ib(default=False, repr=False)
# lists of copuyrights, emails and URLs that can be ignored when detected
# in this license as they are part of the license or rule text itself
ignorable_copyrights = attr.ib(default=attr.Factory(list), repr=False)
ignorable_holders = attr.ib(default=attr.Factory(list), repr=False)
ignorable_authors = attr.ib(default=attr.Factory(list), repr=False)
ignorable_urls = attr.ib(default=attr.Factory(list), repr=False)
ignorable_emails = attr.ib(default=attr.Factory(list), repr=False)
###########################################################################
# path to the YAML data file for this rule
data_file = attr.ib(default=None, repr=False)
# path to the rule text file
text_file = attr.ib(default=None, repr=False)
# text of this rule for special cases where the rule is not backed by a file:
# for SPDX license expression dynamic rules or testing
stored_text = attr.ib(default=None, repr=False)
# These attributes are computed upon text loading or setting the thresholds
###########################################################################
# lengths in tokens
length = attr.ib(default=0)
min_matched_length = attr.ib(default=0, repr=TRACE_REPR)
high_length = attr.ib(default=0, repr=TRACE_REPR)
min_high_matched_length = attr.ib(default=0, repr=TRACE_REPR)
# lengths in unique token.
length_unique = attr.ib(default=0, repr=TRACE_REPR)
min_matched_length_unique = attr.ib(default=0, repr=TRACE_REPR)
high_length_unique = attr.ib(default=0, repr=TRACE_REPR)
min_high_matched_length_unique = attr.ib(default=0, repr=TRACE_REPR)
is_small = attr.ib(default=False, repr=TRACE_REPR)
has_computed_thresholds = attr.ib(default=False, repr=False)
def get_length(self, unique=False):
return self.length_unique if unique else self.length
def get_min_matched_length(self, unique=False):
return (self.min_matched_length_unique if unique
else self.min_matched_length)
def get_high_length(self, unique=False):
return self.high_length_unique if unique else self.high_length
def get_min_high_matched_length(self, unique=False):
return (self.min_high_matched_length_unique if unique
else self.min_high_matched_length)
def __attrs_post_init__(self, *args, **kwargs):
if not self.text_file:
# for SPDX or tests only
if not self.stored_text :
raise Exception('Invalid rule without its corresponding text file: {}'.format(self))
self.identifier = '_tst_' + str(len(self.stored_text))
else:
self.identifier = file_name(self.text_file)
if self.data_file:
try:
self.load()
except Exception as e:
data_file = self.data_file
trace = traceback.format_exc()
message = 'While loading: file://{data_file}\n{trace}'.format(**locals())
raise Exception(message)
if self.relevance and self.relevance != 100:
self.has_stored_relevance = True
if self.minimum_coverage:
self.has_stored_minimum_coverage = True
if self.license_expression:
try:
expression = self.licensing.parse(self.license_expression)
except:
raise Exception(
'Unable to parse License rule expression: '
+repr(self.license_expression) + ' for: file://' + self.data_file +
'\n' + traceback.format_exc()
)
if expression is None:
raise Exception(
'Unable to parse License rule expression: '
+repr(self.license_expression) + ' for: file://' + self.data_file)
self.license_expression = expression.render()
self.license_expression_object = expression
def tokens(self):
"""
Return an iterable of token strings for this rule. Length, relevance and
minimum_coverage may be recomputed as a side effect.
"""
length = 0
text = self.text()
text = text.strip()
# FIXME: this is weird:
# We tag this rule as being a bare URL if it starts with a scheme and is
# on one line: this is used to determine a matching approach
# FIXME: this does not lower the text first??
if text.startswith(('http://', 'https://', 'ftp://')) and '\n' not in text[:1000].lower():
self.minimum_coverage = 100
for token in query_tokenizer(self.text()):
length += 1
yield token
self.length = length
self.compute_relevance()
def text(self):
"""
Return the rule text loaded from its file.
"""
if self.text_file and exists(self.text_file):
# IMPORTANT: use the same process as query text loading for symmetry
numbered_lines = numbered_text_lines(self.text_file, demarkup=False, plain_text=True)
return ''.join(l for _, l in numbered_lines)
# used for non-file backed rules
elif self.stored_text:
return self.stored_text
else:
raise Exception('Inconsistent rule text for: ' +
self.identifier + '\nfile://' + self.text_file)
def license_keys(self, unique=True):
"""
Return a list of license keys for this rule.
"""
if not self.license_expression:
return []
return self.licensing.license_keys(self.license_expression_object, unique=unique)
def same_licensing(self, other):
"""
Return True if the other rule has the same licensing as this rule.
"""
if self.license_expression and other.license_expression:
return self.licensing.is_equivalent(
self.license_expression_object, other.license_expression_object)
def licensing_contains(self, other):
"""
Return True if this rule licensing contains the other rule licensing.
"""
if self.license_expression and other.license_expression:
return self.licensing.contains(
self.license_expression_object, other.license_expression_object)
def compute_thresholds(self, small_rule=SMALL_RULE):
"""
Compute and set thresholds either considering the occurrence of all
tokens or the occurance of unique tokens.
"""
minimum_coverage, self.min_matched_length, self.min_high_matched_length = (
compute_thresholds_occurences(
self.minimum_coverage,
self.length,
self.high_length))
if not self.has_stored_minimum_coverage:
self.minimum_coverage = minimum_coverage
self._minimum_containment = self.minimum_coverage / 100
self.min_matched_length_unique, self.min_high_matched_length_unique = (
compute_thresholds_unique(
self.minimum_coverage,
self.length,
self.length_unique, self.high_length_unique))
self.is_small = self.length < small_rule
def to_dict(self):
"""
Return an ordered mapping of self, excluding texts. Used for
serialization. Empty values are not included.
"""
data = OrderedDict()
if self.license_expression:
data['license_expression'] = self.license_expression
flags = (
'is_false_positive',
'is_negative',
'is_license_text',
'is_license_notice',
'is_license_reference',
'is_license_tag',
'only_known_words',
)
for flag in flags:
tag_value = getattr(self, flag, False)
if tag_value:
data[flag] = tag_value
if self.has_stored_relevance and self.relevance:
rl = self.relevance
if isinstance(rl, float) and int(rl) == rl:
rl = int(rl)
data['relevance'] = rl
if self.has_stored_minimum_coverage and self.minimum_coverage > 0:
cv = self.minimum_coverage
if isinstance(cv, float) and int(cv) == cv:
cv = int(cv)
data['minimum_coverage'] = cv
if self.referenced_filenames:
data['referenced_filenames'] = self.referenced_filenames
if self.notes:
data['notes'] = self.notes
if self.ignorable_copyrights:
data['ignorable_copyrights'] = self.ignorable_copyrights
if self.ignorable_holders:
data['ignorable_holders'] = self.ignorable_holders
if self.ignorable_authors:
data['ignorable_authors'] = self.ignorable_authors
if self.ignorable_urls:
data['ignorable_urls'] = self.ignorable_urls
if self.ignorable_emails:
data['ignorable_emails'] = self.ignorable_emails
return data
def dump(self):
"""
Dump a representation of this rule as two files:
- a .yml for the rule data in YAML (self.data_file)
- a .RULE: the rule text as a UTF-8 file (self.text_file)
Does nothing if this rule was created from a License (e.g.
`is_license` is True)
"""
if self.is_license:
return
def write(location, byte_string):
# we write as binary because rules and licenses texts and data are UTF-8-encoded bytes
with io.open(location, 'wb') as of:
of.write(byte_string)
if self.data_file:
as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8')
write(self.data_file, as_yaml)
write(self.text_file, self.text().encode('utf-8'))
def load(self):
"""
Load self from a .RULE YAML file stored in self.data_file.
Does not load the rule text file.
Unknown fields are ignored and not bound to the Rule object.
"""
try:
with io.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
except Exception as e:
print('#############################')
print('INVALID LICENSE RULE FILE:', 'file://' + self.data_file)
print('#############################')
print(e)
print('#############################')
# this is a rare case, but yes we abruptly stop.
raise e
known_attributes = set(attr.fields_dict(self.__class__))
data_file_attributes = set(data)
unknown_attributes = data_file_attributes.difference(known_attributes)
if unknown_attributes:
unknown_attributes = ', '.join(sorted(unknown_attributes))
msg = 'License rule {} data file has unknown attributes: {}'
raise Exception(msg.format(self, unknown_attributes))
self.license_expression = data.get('license_expression')
self.is_negative = data.get('is_negative', False)
self.is_false_positive = data.get('is_false_positive', False)
if not self.license_expression and not (self.is_negative or self.is_false_positive):
msg = 'License rule {} is missing a license_expression.'
raise Exception(msg.format(self))
relevance = float(data.get('relevance', 0))
if relevance:
if relevance <= 0 or relevance > 100:
msg = ('License rule {} data file has an invalid relevance. '
'Should be above 0 and 100 or less: {}')
raise Exception(msg.format(self, repr(relevance)))
# Keep track if we have a stored relevance of not.
self.relevance = relevance
self.has_stored_relevance = True
self.minimum_coverage = float(data.get('minimum_coverage', 0))
self._minimum_containment = self.minimum_coverage / 100
if not (0 <= self.minimum_coverage <= 100):
msg = (
'License rule {} data file has an invalid minimum_coverage. '
'Should be between 0 and 100: {}')
raise Exception(msg.format(self, self.minimum_coverage))
self.is_license_text = data.get('is_license_text', False)
self.is_license_notice = data.get('is_license_notice', False)
self.is_license_tag = data.get('is_license_tag', False)
self.is_license_reference = data.get('is_license_reference', False)
self.only_known_words = data.get('only_known_words', False)
self.referenced_filenames = data.get('referenced_filenames', []) or []
if not isinstance(self.referenced_filenames, list):
msg = (
'License rule {} data file has an invalid referenced_filenames. '
'Should be a list: {}')
raise Exception(msg.format(self, self.referenced_filenames))
# these are purely informational and not used at run time
notes = data.get('notes')
if notes:
self.notes = notes.strip()
if not self.notes and (self.is_negative or self.is_false_positive):
msg = 'Special License rule {} is missing explanatory notes.'
raise Exception(msg.format(self))
self.ignorable_copyrights = data.get('ignorable_copyrights', [])
self.ignorable_holders = data.get('ignorable_holders', [])
self.ignorable_authors = data.get('ignorable_authors', [])
self.ignorable_urls = data.get('ignorable_urls', [])
self.ignorable_emails = data.get('ignorable_emails', [])
return self
def compute_relevance(self):
"""
Compute and set the `relevance` attribute for this rule. The
relevance is a float between 0 and 100 where 100 means highly
relevant and 0 means not relevant at all.
For instance a match to the "gpl" or the "cpol" words have a fairly low
relevance as they are a weak indication of an actual license and could be a
false positive and should therefore be assigned a low relevance. In contrast
a match to most or all of the apache-2.0 license text is highly relevant. The
Rule relevance is used as the basis to compute a match score.
The relevance is either pre-defined in the rule YAML data file with the
"relevance" attribute or computed base on the rule length here using
this approach:
- a false positive or a negative rule has a relevance of 100.
- a rule of length equal to or larger than a threshold has a 100 relevance
- a rule of length smaller than a threshold has a relevance of
100/threshold, rounded down.
The current threshold is 18 words.
"""
if isinstance(self, SpdxRule):
self.relevance = 100
return
if self.has_stored_relevance:
return
# case for false positive
if self.is_false_positive:
self.relevance = 100
return
# case for negative rules with no license (and are not an FP)
# they do not have licenses and their matches are never returned
if self.is_negative:
self.relevance = 100
return
threshold = 18.0
relevance_of_one_word = round((1 / threshold) * 100, 2)
length = self.length
if length >= threshold:
# general case
self.relevance = 100
else:
computed = int(length * relevance_of_one_word)
self.relevance = min([100, computed])
@property
def has_flags(self):
"""
Return True if this Rule has at least one flag set.
"""
return (self.is_license_text or self.is_license_notice
or self.is_license_reference or self.is_license_tag)
def compute_thresholds_occurences(minimum_coverage, length, high_length,
_MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH,
_MIN_MATCH_LENGTH=MIN_MATCH_LENGTH):
"""
Compute and return thresholds considering the occurrence of all tokens.
"""
if minimum_coverage == 100:
min_matched_length = length
min_high_matched_length = high_length
return minimum_coverage, min_matched_length, min_high_matched_length
if length < 3:
min_high_matched_length = high_length
min_matched_length = length
minimum_coverage = 100
elif length < 10:
min_matched_length = length
min_high_matched_length = high_length
minimum_coverage = 80
elif length < 30:
min_matched_length = length // 2
min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH)
minimum_coverage = 50
elif length < 200:
min_matched_length = _MIN_MATCH_LENGTH
min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH)
# minimum_coverage = max(15, int(length//10))
else: # if length >= 200:
min_matched_length = length // 10
min_high_matched_length = high_length // 10
# minimum_coverage = int(length//10)
return minimum_coverage, min_matched_length, min_high_matched_length
def compute_thresholds_unique(minimum_coverage, length, length_unique, high_length_unique,
_MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH,
_MIN_MATCH_LENGTH=MIN_MATCH_LENGTH):
"""
Compute and set thresholds considering the occurrence of only unique tokens.
"""
if minimum_coverage == 100:
min_matched_length_unique = length_unique
min_high_matched_length_unique = high_length_unique
return min_matched_length_unique, min_high_matched_length_unique
if length > 200:
min_matched_length_unique = length // 10
min_high_matched_length_unique = high_length_unique // 10
elif length < 5:
min_matched_length_unique = length_unique
min_high_matched_length_unique = high_length_unique
elif length < 10:
if length_unique < 2:
min_matched_length_unique = length_unique
else:
min_matched_length_unique = length_unique - 1
min_high_matched_length_unique = high_length_unique
elif length < 20:
min_matched_length_unique = high_length_unique
min_high_matched_length_unique = high_length_unique
else:
min_matched_length_unique = _MIN_MATCH_LENGTH
highu = (int(high_length_unique // 2)) or high_length_unique
min_high_matched_length_unique = min(highu, _MIN_MATCH_HIGH_LENGTH)
return min_matched_length_unique, min_high_matched_length_unique
@attr.s(slots=True, repr=False)
class SpdxRule(Rule):
"""
A specialized rule object that is used for the special case of SPDX license
expressions.
Since we may have an infinite possible number of SPDX expressions and these
are not backed by a traditional rule text file, we use this class to handle
the specifics of these how rules that are built at matching time: one rule
is created for each detected SPDX license expression.
"""
def __attrs_post_init__(self, *args, **kwargs):
self.identifier = 'spdx-license-identifier: ' + self.license_expression
expression = None
try:
expression = self.licensing.parse(self.license_expression)
except:
raise Exception(
'Unable to parse License rule expression: ' +
repr(self.license_expression) + ' for: SPDX rule:' +
self.stored_text +
'\n' + traceback.format_exc())
if expression is None:
raise Exception(
'Unable to parse License rule expression: '
+repr(self.license_expression) + ' for:' + repr(self.data_file))
self.license_expression = expression.render()
self.license_expression_object = expression
self.is_license_tag = True
self.is_small = False
self.relevance =100
self.has_stored_relevance = True
def load(self):
raise NotImplementedError
def dump(self):
raise NotImplementedError
def _print_rule_stats():
"""
Print rules statistics.
"""
from licensedcode.cache import get_index
idx = get_index()
rules = idx.rules_by_rid
sizes = Counter(r.length for r in rules)
print('Top 15 lengths: ', sizes.most_common(15))
print('15 smallest lengths: ', sorted(sizes.items(),
key=itemgetter(0))[:15])
high_sizes = Counter(r.high_length for r in rules)
print('Top 15 high lengths: ', high_sizes.most_common(15))
print('15 smallest high lengths: ', sorted(high_sizes.items(),
key=itemgetter(0))[:15])
def update_ignorables(licensish, verbose=False, dump=True):
"""
Collect, update and save the ignorable_* attributes of a `licensish` Rule or
License object.
"""
location = licensish.text_file
if verbose:
print('Processing:', 'file://' + location)
if not exists(location):
return licensish
# collect and set ignorable copyrights, holders and authors
from cluecode.copyrights import detect_copyrights
copyrights = set()
holders = set()
authors = set()
for dtype, value, _start, _end in detect_copyrights(location):
if dtype == 'copyrights':
copyrights.add(value)
elif dtype == 'holders':
holders.add(value)
elif dtype == 'authors':
authors.add(value)
licensish.ignorable_copyrights = sorted(copyrights)
licensish.ignorable_holders = sorted(holders)
licensish.ignorable_authors = sorted(authors)
# collect and set ignrable emails and urls
from cluecode.finder import find_urls
from cluecode.finder import find_emails
urls = set(u for (u, _ln) in find_urls(location) if u)
licensish.ignorable_urls = sorted(urls)
emails = set(u for (u, _ln) in find_emails(location) if u)
licensish.ignorable_emails = sorted(emails)
if dump:
licensish.dump()
return licensish
|
# -*- coding: utf-8 -*-
import pandas as pd
class Model():
"""Abstract model class.
This is the top-level class and should not be used directly.
Instead this class is inherited by other more specialised model classes.
"""
def __init__(self):
""
self._inputs=Inputs()
def run(self):
"""Runs the model.
This is an abstract method and should be overloaded by subclasses.
:returns: The model outputs.
:rtype: Outputs
"""
outputs=Outputs()
# timestamps passed from inputs to outputs
outputs._timestamps=self._inputs._timestamps
# CALCULATIONS HERE
return outputs
@property
def inputs(self):
"""The model inputs. Access this object to change the model inputs.
Read-only property.
:rtype: Inputs
"""
return self._inputs
class Inputs():
"""Abstract model inputs class.
This is the top-level class and should not be used directly.
Instead this class is inherited by other more specialised model inputs classes.
"""
def __init__(self):
""
self._timestamps=None
def set_timestamps(self,
start=None,
end=None,
*args,
**kwargs):
"""Convenience method to set the `timestamps` property.
:param start: The start timestamp (optional)
:type start: tuple
:param end: The end timestamp (optional)
:type end: tuple
The remaining input arguments here are passed to the `pandas.date_range` method.
See the pandas documentation for details.
Typcial inputs might be:
* start=(2021,1,1,0,0) (i.e. 1st January 2021)
* freq='H' (for hourly intervals)
* periods=24 (to generate 1 day of hourly intervals)
:rtype: pandas.DatetimeIndex
"""
if not start is None:
start=pd.Timestamp(*start)
if not end is None:
end=pd.Timestamp(*end)
self._timestamps=pd.date_range(start=start,
end=end,
*args,
**kwargs)
return self._timestamps
@property
def timestamps(self):
"""The input timestamps.
Model predictions will be made for each timestamp.
Read / write property.
:rtype: pandas.DatetimeIndex
"""
return self._timestamps
@timestamps.setter
def timestamps(self,value):
""
self._timestamps=value
class Outputs():
"""Abstract model outputs class.
This is the top-level class and should not be used directly.
Instead this class is inherited by other more specialised model outputs classes.
"""
def __init__(self):
"""
"""
self._timestamps=None # ->pd.DatetimeIndex
self._data={} # key -> data name; value-> np.array etc.
def __repr__(self):
""
return ("%s" % self.__class__.__name__
+ "("
+ "timestamps=%s" % self.timestamps
+ ", "
+ "data=%s" % self.data
+ ")"
)
@property
def timestamps(self):
"""The outputs timestamps.
Read-only property.
:rtype: pandas.DatetimeIndex
"""
return self._timestamps
@property
def data(self):
"""The model predictions.
Read-only property.
:returns: A dictionary of the model results.
Key-value pairs are: keys -> the name of the quantity or variable;
values -> a list of the model predictions (this list aligns with the
output timestamps).
:rtype: dict
"""
return self._data
@property
def df(self):
"""A Pandas dataframe of the timestamps and data.
Read-only property.
:returns: A dataframe with: index -> timestamps;
columns -> 'data' keys; values -> `data` values.
:rtype: pandas.DataFrame
"""
return pd.DataFrame(index=self.timestamps,
data=self.data)
|
"""
Component to interface with an alarm control panel.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel/
"""
import logging
import os
from homeassistant.components import verisure
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, SERVICE_ALARM_TRIGGER,
SERVICE_ALARM_DISARM, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_AWAY)
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
DOMAIN = 'alarm_control_panel'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
# Maps discovered services to their platforms
DISCOVERY_PLATFORMS = {
verisure.DISCOVER_ALARMS: 'verisure'
}
SERVICE_TO_METHOD = {
SERVICE_ALARM_DISARM: 'alarm_disarm',
SERVICE_ALARM_ARM_HOME: 'alarm_arm_home',
SERVICE_ALARM_ARM_AWAY: 'alarm_arm_away',
SERVICE_ALARM_TRIGGER: 'alarm_trigger'
}
ATTR_TO_PROPERTY = [
ATTR_CODE,
ATTR_CODE_FORMAT
]
def setup(hass, config):
"""Track states and offer events for sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL,
DISCOVERY_PLATFORMS)
component.setup(config)
def alarm_service_handler(service):
"""Map services to methods on Alarm."""
target_alarms = component.extract_from_service(service)
if ATTR_CODE not in service.data:
code = None
else:
code = service.data[ATTR_CODE]
method = SERVICE_TO_METHOD[service.service]
for alarm in target_alarms:
getattr(alarm, method)(code)
if alarm.should_poll:
alarm.update_ha_state(True)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
for service in SERVICE_TO_METHOD:
hass.services.register(DOMAIN, service, alarm_service_handler,
descriptions.get(service))
return True
def alarm_disarm(hass, code=None, entity_id=None):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_DISARM, data)
def alarm_arm_home(hass, code=None, entity_id=None):
"""Send the alarm the command for arm home."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_ARM_HOME, data)
def alarm_arm_away(hass, code=None, entity_id=None):
"""Send the alarm the command for arm away."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_ARM_AWAY, data)
def alarm_trigger(hass, code=None, entity_id=None):
"""Send the alarm the command for trigger."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_TRIGGER, data)
# pylint: disable=no-self-use
class AlarmControlPanel(Entity):
"""An abstract class for alarm control devices."""
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
def alarm_disarm(self, code=None):
"""Send disarm command."""
raise NotImplementedError()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
raise NotImplementedError()
def alarm_arm_away(self, code=None):
"""Send arm away command."""
raise NotImplementedError()
def alarm_trigger(self, code=None):
"""Send alarm trigger command."""
raise NotImplementedError()
@property
def state_attributes(self):
"""Return the state attributes."""
state_attr = {
ATTR_CODE_FORMAT: self.code_format,
}
return state_attr
|
"""
sample code for LLOCK, SLOCK, LSLOCK
application the method to advection model (periodic boundary condition)
"""
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append("..")
from pyassim import KalmanFilter, LocalLOCK, SpatiallyUniformLOCK, LSLOCK,\
PeriodicAdvection, EulerScheme
def main():
result_dir = "figures/advection"
if not os.path.exists(result_dir):
os.mkdir(result_dir)
seed = 121
np.random.seed(seed)
# parameters
N = 20
x0 = np.exp(-(np.arange(N)-N//2)**2/20)
dt = 0.01
dx = 1
c = 1
sys_sd = 0.001
obs_sd = 0.1
timestep = 10000
ds = 100
# generate data
model = PeriodicAdvection(dx, c, dt, scheme="LW")
scheme = EulerScheme(dt, timestep, model, seed=seed)
true, obs = scheme.noise_added_simulation(x0, sys_sd, obs_sd)
# setup matrices
# adjacency matrix
A = np.eye(N)
A[np.arange(N-1), np.arange(1,N)] = 2
A[np.arange(1,N), np.arange(N-1)] = 3
A[0,-1] = 3
A[-1,0] = 2
# A[np.arange(N-2), np.arange(2,N)] = True
# A[np.arange(2,N), np.arange(N-2)] = True
# A[0,-2] = A[-2,0] = A[1,-1] = A[-1,1] = True
# initial transition matrix
F = np.eye(N)
H = np.eye(N)
# covariance
Q = obs_sd**2 * np.eye(N)
R = obs_sd**2 * np.eye(N)
V0 = obs_sd**2 * np.eye(N)
# execution
kf = KalmanFilter(obs[::ds], x0, V0, F, H, Q, R, em_vars=["transition_matrices"])
kf.em(n_iter=10)
kf.forward()
llock = LocalLOCK(obs[::ds], x0, V0, F, H, Q, R, A.astype(bool), method="elementwise",
estimation_length=20, estimation_interval=5, eta=1.0,
cutoff=10, estimation_mode="forward")
llock.forward()
slock = SpatiallyUniformLOCK(obs[::ds], x0, V0, F, H, Q, R, np.zeros(N), A,
estimation_length=1, estimation_interval=1, eta=1.,
cutoff=10., estimation_mode="forward")
slock.forward()
lslock = LSLOCK(obs[::ds], x0, V0, F, H, Q, R, A, method="gridwise",
estimation_length=10, estimation_interval=5, eta=1.,
cutoff=10., estimation_mode="forward")
lslock.forward()
# draw results
dim=0
plt.figure(figsize=(8,5))
plt.scatter(np.arange(timestep//ds), obs[::ds,dim], label="obs", c="k")
plt.plot(true[::ds,dim], label="true", c="cyan", ls="--")
plt.plot(kf.get_filtered_value(dim), label="kf w/ EM")
plt.plot(llock.get_filtered_value(dim), label="llock")
plt.plot(slock.get_filtered_value(dim), label="slock")
plt.plot(lslock.get_filtered_value(dim), label="lslock")
plt.legend()
plt.savefig(os.path.join(result_dir, "dim{}_estimated.pdf".format(dim)), bbox_inches="tight")
fig, ax = plt.subplots(2,2,figsize=(10,10))
vmin, vmax = obs.min(), obs.max()
sns.heatmap(true[::ds], cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,0])
sns.heatmap(llock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,1])
sns.heatmap(slock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,0])
sns.heatmap(lslock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,1])
ax[0,0].set_title("True")
ax[0,1].set_title("LLOCK")
ax[1,0].set_title("SLOCK")
ax[1,1].set_title("LSLOCK")
for i in range(2):
for j in range(2):
ax[i,j].set_xlabel("space")
ax[i,j].set_ylabel("timestep")
fig.savefig(os.path.join(result_dir, "estimated.pdf"))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Metric:
def __init__(self):
pass
def setSource(self,source):
self.source = source
def getSource(self):
return self.source
def setName(self, name):
self.name = name
def getName(self):
return self.name
def setValue(self, value):
self.value = value
def getValue(self):
return self.value
def __str__(self):
return "{} {} {}".format(self.name,self.value,self.source)
|
#! /usr/bin/env python
from Tkinter import *
from types import *
import math, random, time, sys, os
from optparse import OptionParser
# states that a request/disk go through
STATE_NULL = 0
STATE_SEEK = 1
STATE_XFER = 2
STATE_DONE = 3
# request states
REQ_NOT_STARTED = 0
REQ_DO_READ = 1
REQ_DO_WRITE = 2
# used by parity requests
REQ_PARITY_READ_PHASE_DONE = 4
REQ_PARITY_WRITE_PHASE_BEGIN = 5
# all requests end in DONE state
REQ_DONE = 10
# whether req is read or write
OP_READ = 1
OP_WRITE = 2
class Request:
def __init__(self, logical_address, op_type):
self.logical_address = logical_address
assert(op_type == OP_WRITE or op_type == OP_READ)
self.op_type = op_type
self.disk_to_index_map = {}
self.full_stripe_write = False
self.full_stripe_write_parity = False
self.start_time = -1
return
def MarkFullStripeWrite(self, parity=False):
self.full_stripe_write = True
self.full_stripe_write_parity = parity
return
def FullStripeWriteStatus(self):
return (self.full_stripe_write, self.full_stripe_write_parity)
def GetType(self):
return self.op_type
def GetLogicalAddress(self):
return self.logical_address
def GetStatus(self, index):
return self.status[index]
def GetStatusByDisk(self, disk):
index = self.disk_to_index_map[disk]
return self.status[index]
def SetStatus(self, index, status):
# print 'STATUS', self.phys_disk_list[index], self.PrintableStatus(status)
self.status[index] = status
def SetPhysicalAddress(self, disk_list, offset):
self.phys_disk_list = disk_list
cnt = 0
for disk in self.phys_disk_list:
self.disk_to_index_map[disk] = cnt
cnt += 1
self.phys_offset = offset
self.status = []
for disk in self.phys_disk_list:
self.status.append(REQ_NOT_STARTED)
return
def PrintableStatus(self, status):
if status == REQ_NOT_STARTED:
return 'REQ_NOT_STARTED'
if status == REQ_DO_WRITE:
return 'REQ_DO_WRITE'
if status == REQ_DO_READ:
return 'REQ_DO_READ'
if status == REQ_DONE:
return 'REQ_DONE'
if status == REQ_PARITY_READ_PHASE_DONE:
return 'REQ_PARITY_READ_PHASE_DONE'
if status == REQ_PARITY_WRITE_PHASE_BEGIN:
return 'REQ_PARITY_WRITE_PHASE_BEGIN'
print 'BAD STATUS', status
exit(1)
return
def MarkStart(self, timer):
if self.start_time == -1:
self.start_time = timer
return
def RequestLevel0Done(self, disk, timer):
index = self.disk_to_index_map[disk]
if self.status[index] == REQ_DO_READ or self.status[index] == REQ_DO_WRITE:
self.status[index] = REQ_DONE
return (True, timer - self.start_time)
def RequestLevel1Done(self, disk, timer):
index = self.disk_to_index_map[disk]
if self.status[index] == REQ_DO_READ:
self.status[index] = REQ_DONE
return (True, timer - self.start_time)
# this is for WRITES (only done when BOTH writes are done)
assert(self.status[index] == REQ_DO_WRITE)
self.status[index] = REQ_DONE
if self.status[1-index] == REQ_DONE:
return (True, timer - self.start_time)
return (False, -1)
# this is for RAID4 right now
def RequestLevel4Done(self, disk, timer):
index = self.disk_to_index_map[disk]
# print 'Done', self.PrintableStatus(self.status[index])
if self.op_type == OP_READ:
return (True, timer - self.start_time)
# this is for WRITES (which have two phases)
if self.status[index] == REQ_DO_READ:
self.status[index] = REQ_PARITY_READ_PHASE_DONE
elif self.status[index] == REQ_DO_WRITE:
self.status[index] = REQ_DONE
if self.status[index] == REQ_PARITY_READ_PHASE_DONE and self.status[1-index] == REQ_PARITY_READ_PHASE_DONE:
self.status[0] = REQ_PARITY_WRITE_PHASE_BEGIN
self.status[1] = REQ_PARITY_WRITE_PHASE_BEGIN
if self.status[index] == REQ_DONE and self.status[1-index] == REQ_DONE:
return (True, timer - self.start_time)
return (False, -1)
def GetPhysicalOffset(self):
return self.phys_offset
def GetPhysicalDiskList(self):
return self.phys_disk_list
class Raid:
def __init__(self, mapping, addr_desc, addr, disk_count, seek_speed, seed, balance, read_fraction, window, animate_delay):
self.mapping = mapping
self.disk_count = disk_count
self.seek_speed = seek_speed
self.addr_desc = addr_desc
self.balance = balance
self.addr = addr
self.read_fraction = read_fraction
self.window = window
self.animate_delay = animate_delay
random.seed(seed)
self.root = Tk()
self.canvas = Canvas(self.root, width=560, height=530)
self.canvas.pack()
# make the disks
disk_width = 100
self.head_width = 10
self.head_height = 20
# now distribute blocks - assume striping first
self.block_offset = {}
# maps for scheduling
self.offset_to_ypos = {}
# maps for coloring blocks of the "disk"
self.disk_and_offset_to_rect_id = {}
self.color_map = {}
if self.mapping == 0:
# CREATE STRIPED CONFIGURATION
self.block_count = 80
for i in range(self.block_count):
disk = i % self.disk_count
offset = i / self.disk_count
rect_x = 40 + ((20 + disk_width) * disk)
rect_y = (20 * offset) + 100
self.color_map[(disk, offset)] = 'gray'
rect_id = self.canvas.create_rectangle(rect_x, rect_y, rect_x+disk_width, rect_y+20, fill='gray', outline='black')
text_id = self.canvas.create_text(rect_x + disk_width - disk_width/2.0, rect_y+10, text='%s' % i, anchor='c')
self.block_offset[i] = rect_y
self.offset_to_ypos[offset] = rect_y
self.disk_and_offset_to_rect_id[(disk, offset)] = rect_id
elif self.mapping == 1:
# CREATE MIRRORED CONFIGURATION
self.block_count = 40
effective_disks = self.disk_count / 2
assert(self.disk_count % 2 == 0)
for i in range(self.block_count):
INDEX = i % effective_disks
disk_1 = INDEX * 2
disk_2 = disk_1 + 1
offset = i / effective_disks
rect_y = (20 * offset) + 100
rect_x = 40 + ((20 + disk_width) * disk_1)
self.color_map[(disk_1, offset)] = 'gray'
rect_id_1 = self.canvas.create_rectangle(rect_x, rect_y, rect_x+disk_width, rect_y+20, fill='gray', outline='black')
text_id_1 = self.canvas.create_text(rect_x + disk_width - disk_width/2.0, rect_y+10, text='%s' % i, anchor='c')
rect_x = 40 + ((20 + disk_width) * disk_2)
self.color_map[(disk_2, offset)] = 'gray'
rect_id_2 = self.canvas.create_rectangle(rect_x, rect_y, rect_x+disk_width, rect_y+20, fill='gray', outline='black')
text_id_2 = self.canvas.create_text(rect_x + disk_width - disk_width/2.0, rect_y+10, text='%s' % i, anchor='c')
self.block_offset[i] = rect_y
self.offset_to_ypos[offset] = rect_y
self.disk_and_offset_to_rect_id[(disk_1, offset)] = rect_id_1
self.disk_and_offset_to_rect_id[(disk_2, offset)] = rect_id_2
elif self.mapping == 4:
# CREATE SIMPLE PARITY CONFIGURATION
self.block_count_full = 80
self.block_count = 60
for i in range(self.block_count):
disk = i % (self.disk_count-1)
offset = i / (self.disk_count-1)
rect_x = 40 + ((20 + disk_width) * disk)
rect_y = (20 * offset) + 100
self.color_map[(disk, offset)] = 'lightgray'
rect_id = self.canvas.create_rectangle(rect_x, rect_y, rect_x+disk_width, rect_y+20, fill='lightgray', outline='black')
text_id = self.canvas.create_text(rect_x + disk_width - disk_width/2.0, rect_y+10, text='%s' % i, anchor='c')
self.block_offset[i] = rect_y
self.offset_to_ypos[offset] = rect_y
self.disk_and_offset_to_rect_id[(disk, offset)] = rect_id
# now make parity blocks
for i in range(self.block_count_full/self.disk_count):
disk = 3
offset = i
rect_x = 40 + ((20 + disk_width) * disk)
rect_y = (20 * offset) + 100
self.color_map[(disk, offset)] = 'darkgray'
rect_id = self.canvas.create_rectangle(rect_x, rect_y, rect_x+disk_width, rect_y+20, fill='darkgray', outline='black')
text_id = self.canvas.create_text(rect_x + disk_width - disk_width/2.0, rect_y+10, text='P%s' % i, anchor='c')
self.block_offset['p' + str(i)] = rect_y
self.offset_to_ypos[offset] = rect_y
self.disk_and_offset_to_rect_id[(disk, offset)] = rect_id
elif self.mapping == 5:
# CREATE RAID-5 config
self.block_count_full = 80
self.block_count = 60
for i in range(self.block_count):
offset = i / (self.disk_count-1)
if offset % 4 == 0:
disk = i % (self.disk_count-1)
elif offset % 4 == 1:
disk = i % (self.disk_count-1)
if disk >= 2:
disk += 1
elif offset % 4 == 2:
disk = i % (self.disk_count-1)
if disk >= 1:
disk += 1
elif offset % 4 == 3:
disk = i % (self.disk_count-1)
disk += 1
rect_x = 40 + ((20 + disk_width) * disk)
rect_y = (20 * offset) + 100
self.color_map[(disk, offset)] = 'gray'
rect_id = self.canvas.create_rectangle(rect_x, rect_y, rect_x+disk_width, rect_y+20, fill='gray', outline='black')
text_id = self.canvas.create_text(rect_x + disk_width - disk_width/2.0, rect_y+10, text='%s' % i, anchor='c')
self.block_offset[i] = rect_y
self.offset_to_ypos[offset] = rect_y
self.disk_and_offset_to_rect_id[(disk, offset)] = rect_id
# now make parity blocks
for i in range(self.block_count_full/self.disk_count):
offset = i
if offset % 4 == 0:
disk = 3
elif offset % 4 == 1:
disk = 2
elif offset % 4 == 2:
disk = 1
elif offset % 4 == 3:
disk = 0
rect_x = 40 + ((20 + disk_width) * disk)
rect_y = (20 * offset) + 100
self.color_map[(disk, offset)] = 'darkgray'
rect_id = self.canvas.create_rectangle(rect_x, rect_y, rect_x+disk_width, rect_y+20, fill='darkgray', outline='black')
text_id = self.canvas.create_text(rect_x + disk_width - disk_width/2.0, rect_y+10, text='P%s' % i, anchor='c')
self.block_offset['p' + str(i)] = rect_y
self.offset_to_ypos[offset] = rect_y
self.disk_and_offset_to_rect_id[(disk, offset)] = rect_id
else:
print 'mapping', self.mapping, 'not supported'
exit(1)
# now draw "disk heads"
self.head_ids = {}
self.head_position = {}
self.disk_state = {}
for disk in range(self.disk_count):
rect_x = 40 - self.head_width + ((20 + disk_width) * disk)
rect_y = 100
head_id = self.canvas.create_rectangle(rect_x, rect_y,
rect_x+self.head_width, rect_y+self.head_height,
fill='black', outline='black')
self.head_ids[disk] = head_id
self.head_position[disk] = {'x1':rect_x, 'y1':rect_y, 'x2':rect_x+self.head_width, 'y2':rect_y+self.head_height}
self.disk_state[disk] = STATE_NULL
# seek targets
self.last_target = {}
self.current_target = {}
self.current_optype = {}
self.seek_delta = {}
for disk in range(self.disk_count):
self.last_target[disk] = -1
self.current_target[disk] = -1
self.current_optype[disk] = -1
self.seek_delta[disk] = 0
self.transfer_count = {}
self.rotate_count = {}
for disk in range(self.disk_count):
self.transfer_count[disk] = -1
self.rotate_count[disk] = -1
# initial requests
self.request_queue = {}
self.request_count = 0
effective_disk_count = 4
if self.mapping == 4:
effective_disk_count = 3
if self.addr == '':
# use 'addr_desc' (num to generate, max, min) to generate these
tmp = self.addr_desc.split(',')
num = int(tmp[0])
req_max = int(tmp[1])
if req_max == -1:
req_max = self.block_count
req_min = int(tmp[2])
if self.balance:
disk_min = num / effective_disk_count
if req_min >= req_max:
print 'bad addr_desc: min should be lower than max', req_min, req_max
exit(1)
target_disk = 0
for i in range(num):
while True:
req = int(random.random() * req_max)
if req % effective_disk_count != target_disk:
continue
target_disk += 1
if target_disk == effective_disk_count:
target_disk = 0
# print target_disk
if req >= req_min:
if random.random() < read_fraction:
self.request_queue[i] = Request(req, OP_READ)
else:
self.request_queue[i] = Request(req, OP_WRITE)
break
else:
# HAND-PASSED IN addresses
# argument: comma-separated list of numbers
tmp = self.addr.split(',')
for i in range(len(tmp)):
if tmp[i][0] == 'r':
self.request_queue[i] = Request(int(tmp[i].replace('r','')), OP_READ)
elif tmp[i][0] == 'w':
self.request_queue[i] = Request(int(tmp[i].replace('w','')), OP_WRITE)
else:
print 'Must specify reads vs writes, e.g., r10 or w6'
exit(1)
self.request_count_needed = len(self.request_queue)
# fill in extra info about requests
if self.mapping == 0:
# STRIPING
for i in range(len(self.request_queue)):
request = self.request_queue[i]
logical = request.GetLogicalAddress()
assert(logical < self.block_count)
disk = logical % self.disk_count
offset = logical / self.disk_count
request.SetPhysicalAddress([disk], offset)
elif self.mapping == 1:
# MIRRORING
for i in range(len(self.request_queue)):
request = self.request_queue[i]
if request.GetType() == OP_WRITE:
self.request_count_needed += 1
effective_disks = self.disk_count / 2
logical = request.GetLogicalAddress()
assert(logical < self.block_count)
disk_1 = 2 * (logical % effective_disks)
disk_2 = disk_1 + 1
offset = logical / effective_disks
request.SetPhysicalAddress([disk_1, disk_2], offset)
elif self.mapping == 4:
# RAID-4 (PARITY DISK)
for i in range(len(self.request_queue)):
request = self.request_queue[i]
if request.GetType() == OP_WRITE:
self.request_count_needed += 3
logical = request.GetLogicalAddress()
assert(logical < self.block_count)
disk = logical % (self.disk_count-1)
offset = logical / (self.disk_count-1)
request.SetPhysicalAddress([disk, 3], offset)
# XXX This really only works for SOME demos
# (it is not a general purpose feature)
for i in range(0,len(self.request_queue),3):
if i+2 >= len(self.request_queue):
continue
logical = self.request_queue[i].GetLogicalAddress()
if self.request_queue[i+1].GetLogicalAddress() == logical + 1:
if self.request_queue[i+2].GetLogicalAddress() == logical + 2:
# full stripe detected: now mark and handle differently when scheduling
for j in range(i, i+2):
self.request_queue[j].MarkFullStripeWrite()
self.request_queue[i+2].MarkFullStripeWrite(True)
self.request_count_needed -= 8
elif self.mapping == 5:
# RAID-5 (ROTATED PARITY)
for i in range(len(self.request_queue)):
request = self.request_queue[i]
if request.GetType() == OP_WRITE:
self.request_count_needed += 3
logical = request.GetLogicalAddress()
assert(logical < self.block_count)
disk = logical % (self.disk_count-1)
offset = logical / (self.disk_count-1)
if offset % 4 == 0:
parity_disk = 3
elif offset % 4 == 1:
parity_disk = 2
if disk >= 2:
disk += 1
elif offset % 4 == 2:
parity_disk = 1
if disk >= 1:
disk += 1
elif offset % 4 == 3:
parity_disk = 0
disk += 1
# print 'LOGICAL', logical, 'offset', offset, 'disk', disk, 'paritydisk', parity_disk
request.SetPhysicalAddress([disk, parity_disk], offset)
# draw request queue
self.request_queue_box_ids = []
self.request_queue_text_ids = []
self.request_queue_count_ids = []
self.request_queue_counts = []
x_start = 40
x = x_start
y = 32
sz = 10
font = ('Helvetica', sz+4)
font_small = ('Helvetica', 8)
for index in range(len(self.request_queue)):
if x > 500:
x = x_start
y += (2*sz) + 2
request = self.request_queue[index]
logical = request.GetLogicalAddress()
self.request_queue_box_ids.append(self.canvas.create_rectangle(x-sz,y-sz,x+sz,y+sz,fill='white',outline=''))
self.request_queue_text_ids.append(self.canvas.create_text(x, y, text=str(logical), anchor='c', font=font))
self.request_queue_count_ids.append(self.canvas.create_text(x+8, y+8, text=str(0), anchor='c', font=font_small))
self.request_queue_counts.append(0)
x += (2*sz)
# BINDINGS
self.root.bind('s', self.Start)
self.root.bind('p', self.Pause)
self.root.bind('q', self.Exit)
# draw current limits of queue
self.windowID = -1
self.DrawWindow()
# TIME INFO and other stats
self.timeID = self.canvas.create_text(10, 10, text='Time: 0.00', anchor='w')
self.timer = 0
self.logical_requests = 0
self.latency_total = 0
# read/write counts
self.count_reads = {}
self.count_writes = {}
self.count_reads_id = {}
self.count_writes_id = {}
x = disk_width - 10
font = ('Helvetica', 14)
for i in range(self.disk_count):
self.count_reads[i] = 0
self.count_writes[i] = 0
self.canvas.create_rectangle(x-50,510,x,530, fill='orange', outline='')
self.canvas.create_rectangle(x+50,510,x,530, fill='yellow', outline='')
self.count_reads_id[i] = self.canvas.create_text(x-20, 520, text='R:0', anchor='c', font=font)
self.count_writes_id[i] = self.canvas.create_text(x+20, 520, text='W:0', anchor='c', font=font)
x += disk_width + 20
# set up animation loop
self.do_animate = True
self.is_done = False
return
# call this to start simulation
def Go(self):
self.root.mainloop()
return
#
# BUTTONS
#
def Start(self, event):
self.GetNextIOs()
self.Animate()
return
def Pause(self, event):
if self.do_animate == False:
self.do_animate = True
else:
self.do_animate = False
return
def Exit(self, event):
sys.exit(0)
return
#
# ROUTINES
#
def UpdateWriteCounter(self, disk, how_much):
self.count_writes[disk] += how_much
self.canvas.itemconfig(self.count_writes_id[disk], text='W:%d' % self.count_writes[disk])
return
def UpdateReadCounter(self, disk, how_much):
self.count_reads[disk] += how_much
self.canvas.itemconfig(self.count_reads_id[disk], text='R:%d' % self.count_reads[disk])
return
def UpdateTime(self):
self.canvas.itemconfig(self.timeID, text='Time: ' + str(self.timer))
return
def DrawWindow(self):
return
def BlockSetColor(self, disk, offset, color):
block_id = self.disk_and_offset_to_rect_id[(disk, offset)]
self.canvas.itemconfig(block_id, fill=color)
return
def QueueSetColor(self, index, fill_color):
box_id = self.request_queue_box_ids[index]
self.canvas.itemconfig(box_id, fill=fill_color)
self.request_queue_counts[index] += 1
count_id = self.request_queue_count_ids[index]
self.canvas.itemconfig(count_id, text='%d' % self.request_queue_counts[index])
return
def SetSeekDirection(self, disk, dest_block):
if self.GetHeadPosition(disk) < self.block_offset[dest_block]:
self.seek_delta[disk] = self.seek_speed
else:
self.seek_delta[disk] = -self.seek_speed
return
def StartRead(self, disk, offset, logical_address, request, queue_index):
self.current_optype[disk] = OP_READ
self.StartRequest(disk, offset, logical_address, request, queue_index, 'orange')
return
def StartWrite(self, disk, offset, logical_address, request, queue_index):
self.current_optype[disk] = OP_WRITE
self.StartRequest(disk, offset, logical_address, request, queue_index, 'yellow')
return
def StartRequest(self, disk, offset, logical_address, request, queue_index, fill_color):
self.QueueSetColor(queue_index, fill_color)
self.disk_state[disk] = STATE_SEEK
self.BlockSetColor(disk, offset, fill_color)
self.SetSeekDirection(disk, logical_address)
self.last_target[disk] = self.current_target[disk]
self.current_target[disk] = request
return
def DoStripeScheduling(self, disk, index):
request = self.request_queue[index]
logical = request.GetLogicalAddress()
if request.GetStatus(0) == REQ_NOT_STARTED and logical % self.disk_count == disk:
offset = request.GetPhysicalOffset()
request.MarkStart(self.timer)
if request.GetType() == OP_READ:
request.SetStatus(0, REQ_DO_READ)
self.StartRead(disk, offset, logical, request, index)
else:
request.SetStatus(0, REQ_DO_WRITE)
self.StartWrite(disk, offset, logical, request, index)
return
return
def DoMirrorScheduling(self, disk, index):
request = self.request_queue[index]
logical = request.GetLogicalAddress()
disks = request.GetPhysicalDiskList()
if disks[0] == disk:
disk_index = 0
elif disks[1] == disk:
disk_index = 1
else:
return
if request.GetStatus(disk_index) == REQ_NOT_STARTED and (disk == disks[0] or disk == disks[1]):
offset = request.GetPhysicalOffset()
request.MarkStart(self.timer)
if request.GetType() == OP_READ:
request.SetStatus(disk_index, REQ_DO_READ)
request.SetStatus(1 - disk_index, REQ_DONE)
self.StartRead(disk, offset, logical, request, index)
else:
request.SetStatus(disk_index, REQ_DO_WRITE)
self.StartWrite(disk, offset, logical, request, index)
return
return
def DoRaid4Scheduling(self, disk, index):
request = self.request_queue[index]
logical = request.GetLogicalAddress()
# reads: easy case, just like striped read
if request.GetType() == OP_READ and request.GetStatus(0) == REQ_NOT_STARTED and logical % (self.disk_count-1) == disk:
request.MarkStart(self.timer)
request.SetStatus(0, REQ_DO_READ)
offset = request.GetPhysicalOffset()
self.StartRead(disk, offset, logical, request, index)
return
# now focus on writes: which turn into two reads, two writes
if request.GetType() != OP_WRITE:
return
disks = request.GetPhysicalDiskList()
if disks[0] != disk and disks[1] != disk:
return
if disks[0] == disk:
disk_index = 0
elif disks[1] == disk:
disk_index = 1
# check for possible FULL STRIPE WRITE
(full_stripe_write, do_parity) = request.FullStripeWriteStatus()
if full_stripe_write:
offset = request.GetPhysicalOffset()
if do_parity == False and request.GetStatus(disk_index) == REQ_NOT_STARTED:
# print 'doing FULL STRIPE WRITE (parity)'
# in this case, turn off both reads and write to parity disk
request.MarkStart(self.timer)
request.SetStatus(disk_index, REQ_DO_WRITE)
request.SetStatus(1-disk_index, REQ_DONE)
self.StartWrite(disk, offset, logical, request, index)
return
if do_parity == True and request.GetStatus(disk_index) == REQ_NOT_STARTED:
# in this case, turn off reads but ensure both writes happen
request.MarkStart(self.timer)
request.SetStatus(disk_index, REQ_DO_WRITE)
# request.SetStatus(1, REQ_DO_WRITE)
# print 'doing FULL STRIPE WRITE (non-parity)'
self.StartWrite(disk, offset, logical, request, index)
return
# normal case: SUBTRACTIVE PARITY handling
# handle a LOGICAL WRITE that has not yet started
# it starts with a READ
if request.GetStatus(disk_index) == REQ_NOT_STARTED:
request.MarkStart(self.timer)
request.SetStatus(disk_index, REQ_DO_READ)
offset = request.GetPhysicalOffset()
self.StartRead(disk, offset, logical, request, index)
return
# handle a LOGICAL write that is mid way
# it is ended with a WRITE
if request.GetStatus(disk_index) == REQ_PARITY_WRITE_PHASE_BEGIN:
request.SetStatus(disk_index, REQ_DO_WRITE)
offset = request.GetPhysicalOffset()
self.StartWrite(disk, offset, logical, request, index)
return
return
def DoRaid5Scheduling(self, disk, index):
request = self.request_queue[index]
logical = request.GetLogicalAddress()
# reads: easy case, just like striped read
if request.GetType() == OP_READ and request.GetStatus(0) == REQ_NOT_STARTED and request.GetPhysicalDiskList()[0] == disk:
request.MarkStart(self.timer)
request.SetStatus(0, REQ_DO_READ)
offset = request.GetPhysicalOffset()
# print 'start', disk, offset
self.StartRead(disk, offset, logical, request, index)
return
# now focus on writes: which turn into two reads, two writes
if request.GetType() != OP_WRITE:
return
disks = request.GetPhysicalDiskList()
if disks[0] != disk and disks[1] != disk:
return
if disks[0] == disk:
disk_index = 0
elif disks[1] == disk:
disk_index = 1
# normal case: SUBTRACTIVE PARITY handling
# handle a LOGICAL WRITE that has not yet started
# it starts with a READ
if request.GetStatus(disk_index) == REQ_NOT_STARTED:
request.MarkStart(self.timer)
request.SetStatus(disk_index, REQ_DO_READ)
offset = request.GetPhysicalOffset()
# print 'start read', logical, disk, offset
self.StartRead(disk, offset, logical, request, index)
return
# handle a LOGICAL write that is mid way
# it is ended with a WRITE
if request.GetStatus(disk_index) == REQ_PARITY_WRITE_PHASE_BEGIN:
request.SetStatus(disk_index, REQ_DO_WRITE)
offset = request.GetPhysicalOffset()
# print 'start write', logical, disk, offset
self.StartWrite(disk, offset, logical, request, index)
return
return
def GetNextIOs(self):
# check if done: if so, print stats and end animation
if self.request_count == self.request_count_needed:
self.UpdateTime()
self.PrintStats()
self.do_animate = False
self.is_done = True
return
# scheduler
for disk in range(self.disk_count):
count = 0
for index in self.request_queue:
if self.window != -1 and count >= self.window:
continue
count += 1
if self.mapping == 0:
if self.disk_state[disk] == STATE_NULL:
self.DoStripeScheduling(disk, index)
elif self.mapping == 1:
if self.disk_state[disk] == STATE_NULL:
self.DoMirrorScheduling(disk, index)
elif self.mapping == 4:
if self.disk_state[disk] == STATE_NULL:
self.DoRaid4Scheduling(disk, index)
elif self.mapping == 5:
if self.disk_state[disk] == STATE_NULL:
self.DoRaid5Scheduling(disk, index)
return
def GetHeadPosition(self, disk):
return self.head_position[disk]['y1']
def MoveHead(self, disk):
self.head_position[disk]['y1'] += self.seek_delta[disk]
self.head_position[disk]['y2'] += self.seek_delta[disk]
self.canvas.coords(self.head_ids[disk],
self.head_position[disk]['x1'], self.head_position[disk]['y1'],
self.head_position[disk]['x2'], self.head_position[disk]['y2'])
return
def DoneWithSeek(self, disk):
request = self.current_target[disk]
if self.GetHeadPosition(disk) == self.offset_to_ypos[request.GetPhysicalOffset()]:
return True
return False
def StartTransfer(self, disk):
offset_current = self.current_target[disk].GetPhysicalOffset()
if self.last_target[disk] == -1:
offset_last = -1
else:
# print self.last_target[disk]
offset_last = self.last_target[disk].GetPhysicalOffset()
if offset_current == offset_last + 1:
self.transfer_count[disk] = 1
else:
self.transfer_count[disk] = 10
return
def DoneWithTransfer(self, disk):
return self.transfer_count[disk] == 0
# called when a single IO is finished
# note: request (as in mirrored or parity write) contains multiple IOs
def MarkDone(self, disk):
request = self.current_target[disk]
low_level_op_type = self.current_optype[disk]
if low_level_op_type == OP_WRITE:
self.UpdateWriteCounter(disk, 1)
elif low_level_op_type == OP_READ:
self.UpdateReadCounter(disk, 1)
# this is to move IOs through different phases
if self.mapping == 4 or self.mapping == 5:
(request_done, latency) = request.RequestLevel4Done(disk, self.timer)
elif self.mapping == 1:
(request_done, latency) = request.RequestLevel1Done(disk, self.timer)
elif self.mapping == 0:
(request_done, latency) = request.RequestLevel0Done(disk, self.timer)
if request_done:
self.logical_requests += 1
self.latency_total += latency
# print 'LATENCY', latency
if self.window > 0:
self.window += 1
return
def Animate(self):
if self.do_animate == False:
self.root.after(self.animate_delay, self.Animate)
return
# timer
self.timer += 1
self.UpdateTime()
# move the blocks
# now check if something should be happening
for disk in range(self.disk_count):
if self.disk_state[disk] == STATE_SEEK:
if self.DoneWithSeek(disk):
self.disk_state[disk] = STATE_XFER
block_id = self.disk_and_offset_to_rect_id[(disk, self.current_target[disk].GetPhysicalOffset())]
self.StartTransfer(disk)
else:
self.MoveHead(disk)
if self.disk_state[disk] == STATE_XFER:
self.transfer_count[disk] -= 1
if self.DoneWithTransfer(disk):
offset = self.current_target[disk].GetPhysicalOffset()
self.MarkDone(disk)
self.request_count += 1
self.disk_state[disk] = STATE_NULL
self.BlockSetColor(disk, self.current_target[disk].GetPhysicalOffset(), self.color_map[(disk, offset)])
self.GetNextIOs()
# make sure to keep the animation going!
self.root.after(self.animate_delay, self.Animate)
return
def DoRequestStats(self):
return
def PrintStats(self):
print 'Total Time: ', self.timer
print ' Requests: ', self.logical_requests
print ' Avg Latency: %.2f' % (float(self.latency_total) / float(self.logical_requests))
return
# END: class Disk
#
# MAIN SIMULATOR
#
parser = OptionParser()
parser.add_option('-s', '--seed', default='0', help='Random seed', action='store', type='int', dest='seed')
parser.add_option('-m', '--mapping', default='0', help='0-striping, 1-mirroring, 4-raid4, 5-raid5', action='store', type='int', dest='mapping')
parser.add_option('-a', '--addr', default='', help='Request list (comma-separated) [-1 -> use addrDesc]', action='store', type='string', dest='addr')
parser.add_option('-r', '--read_fraction', default='0.5', help='Fraction of requests that are reads', action='store', type='string', dest='read_fraction')
parser.add_option('-A', '--addr_desc', default='5,-1,0', help='Num requests, max request (-1->all), min request', action='store', type='string', dest='addr_desc')
parser.add_option('-B', '--balanced', default=True, help='If generating random requests, balance across disks', action='store_true', dest='balance')
parser.add_option('-S', '--seek_speed', default='4', help='Speed of seek (1,2,4,5,10,20)', action='store', type='int', dest='seek_speed')
parser.add_option('-p', '--policy', default='FIFO', help='Scheduling policy (FIFO, SSTF, SATF, BSATF)', action='store', type='string', dest='policy')
parser.add_option('-w', '--window', default=-1, help='Size of scheduling window (-1 -> all)', action='store', type='int', dest='window')
parser.add_option('-D', '--delay', default=20, help='Animation delay; bigger is slower', action='store', type='int', dest='animate_delay')
parser.add_option('-G', '--graphics', default=True, help='Turn on graphics', action='store_true', dest='graphics')
parser.add_option('-c', '--compute', default=False, help='Compute the answers', action='store_true', dest='compute')
parser.add_option('-P', '--print_options', default=False, help='Print the options', action='store_true', dest='print_options')
(options, args) = parser.parse_args()
if options.print_options:
print 'OPTIONS seed', options.seed
print 'OPTIONS addr', options.addr
print 'OPTIONS addr_desc', options.addr_desc
print 'OPTIONS seek_speed', options.seek_speed
print 'OPTIONS window', options.window
print 'OPTIONS policy', options.policy
print 'OPTIONS compute', options.compute
print 'OPTIONS read_fraction', options.read_fraction
print 'OPTIONS graphics', options.graphics
print 'OPTIONS animate_delay', options.animate_delay
print ''
if options.window == 0:
print 'Scheduling window (%d) must be positive or -1 (which means a full window)' % options.window
sys.exit(1)
# set up simulator info
d = Raid(mapping=options.mapping, addr_desc=options.addr_desc, addr=options.addr,
disk_count=4, seek_speed=options.seek_speed, seed=options.seed, balance=options.balance,
read_fraction=float(options.read_fraction), window=options.window, animate_delay=options.animate_delay)
# run simulation
d.Go()
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import seed_stream
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow.python.framework import tensor_util
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
tfd = tfp.distributions
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
components=[
tfd.Normal(loc=-1., scale=0.1),
tfd.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
use_static_graph=False,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
use_static_graph: Calls to `sample` will not rely on dynamic tensor
indexing, allowing for some static graph compilation optimizations, but
at the expense of sampling all underlying distributions in the mixture.
(Possibly useful when running on TPUs).
Default value: `False` (i.e., use dynamic indexing).
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = dict(locals())
# TODO(b/117098119): Remove tf.distribution references once they're gone.
if not isinstance(cat, categorical.Categorical) and not isinstance(
cat, tf.distributions.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
# TODO(b/117098119): Remove tf.distribution references once they're gone.
if not all(
isinstance(c, distribution.Distribution) or
isinstance(cat, tf.distributions.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with tf.name_scope(name, values=[cat.logits]) as name:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = tf.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [tf.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
tf.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
tf.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
self._use_static_graph = use_static_graph
if use_static_graph and static_num_components is None:
raise ValueError("Number of categories must be known statically when "
"`static_sample=True`.")
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = tf.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with tf.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return tf.add_n(partial_means)
def _stddev(self):
with tf.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = tf.stack(distribution_means, axis=-1)
stacked_devs = tf.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (
tf.stack(cat_probs, axis=-1) * tf.ones_like(stacked_means))
batched_dev = distribution_util.mixture_stddev(
tf.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
tf.reshape(stacked_means, [-1, len(self.components)]),
tf.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return tf.reshape(batched_dev, tf.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with tf.control_dependencies(self._assertions):
x = tf.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = tf.stack(final_log_probs, 0)
log_sum_exp = tf.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with tf.control_dependencies(self._assertions):
x = tf.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = tf.stack(final_log_cdfs, axis=0)
mixture_log_cdf = tf.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
if self._use_static_graph:
# This sampling approach is almost the same as the approach used by
# `MixtureSameFamily`. The differences are due to having a list of
# `Distribution` objects rather than a single object, and maintaining
# random seed management that is consistent with the non-static code path.
samples = []
cat_samples = self.cat.sample(n, seed=seed)
stream = seed_stream.SeedStream(seed, salt="Mixture")
for c in range(self.num_components):
samples.append(self.components[c].sample(n, seed=stream()))
x = tf.stack(samples, -self._static_event_shape.ndims - 1) # [n, B, k, E]
npdt = x.dtype.as_numpy_dtype
mask = tf.one_hot(
indices=cat_samples, # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_util.pad_mixture_dimensions(
mask, self, self._cat,
self._static_event_shape.ndims) # [n, B, k, [1]*e]
return tf.reduce_sum(
x * mask, axis=-1 - self._static_event_shape.ndims) # [n, B, E]
with tf.control_dependencies(self._assertions):
n = tf.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.shape
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = tf.shape(cat_samples)
samples_size = tf.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = tf.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = tf.reshape(tf.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = tf.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = tf.reshape(
tf.tile(tf.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = tf.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
stream = seed_stream.SeedStream(seed, salt="Mixture")
for c in range(self.num_components):
n_class = tf.size(partitioned_samples_indices[c])
samples_class_c = self.components[c].sample(
n_class, seed=stream())
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * tf.range(n_class) + partitioned_batch_indices[c])
samples_class_c = tf.reshape(
samples_class_c, tf.concat([[n_class * batch_size], event_shape],
0))
samples_class_c = tf.gather(
samples_class_c,
lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = tf.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = tf.reshape(
lhs_flat_ret, tf.concat(
[samples_shape, self.event_shape_tensor()], 0))
ret.set_shape(
tf.TensorShape(static_samples_shape).concatenate(self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with tf.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return tf.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = tf.nn.log_softmax if log_probs else tf.nn.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = tf.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
|
import dash
import dash.testing.wait as wait
from dash_table import DataTable
import pandas as pd
import pytest
from selenium.webdriver.common.keys import Keys
df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/solar.csv")
base_props = dict(
id="table",
columns=[{"name": i, "id": i} for i in df.columns],
row_selectable="single",
row_deletable=True,
data=df.to_dict("records"),
editable=True,
fixed_rows={"headers": True, "data": 1},
style_cell=dict(width=150),
style_table=dict(width=500),
)
def get_margin(test):
return test.driver.execute_script(
"return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-1')).marginLeft);"
)
def get_scroll(test):
return test.driver.execute_script(
"return document.querySelector('#table .dt-table-container__row-1').scrollLeft;"
)
def scroll_by(test, value):
test.driver.execute_script(
"document.querySelector('#table .dt-table-container__row-1').scrollBy({}, 0);".format(
value
)
)
@pytest.mark.parametrize(
"fixed_rows",
[dict(fixed_rows=dict(headers=True)), dict(fixed_rows=dict(headers=True, data=1))],
)
@pytest.mark.parametrize(
"fixed_columns",
[
dict(),
dict(fixed_columns=dict(headers=True)),
dict(fixed_columns=dict(headers=True, data=1)),
],
)
@pytest.mark.parametrize(
"ops", [dict(), dict(row_selectable="single", row_deletable=True)]
)
def test_scrol001_fixed_alignment(test, fixed_rows, fixed_columns, ops):
props = {**base_props, **fixed_rows, **fixed_columns, **ops}
app = dash.Dash(__name__)
app.layout = DataTable(**props)
test.start_server(app)
target = test.table("table")
assert target.is_ready()
fixed_width = test.driver.execute_script(
"return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-0')).width) || 0;"
)
assert -get_margin(test) == fixed_width
scroll_by(test, 200)
wait.until(
lambda: -get_margin(test) == fixed_width + 200, 3,
)
scroll_by(test, -200)
wait.until(
lambda: -get_margin(test) == fixed_width, 3,
)
assert test.get_log_errors() == []
@pytest.mark.parametrize(
"fixed_rows",
[dict(fixed_rows=dict(headers=True)), dict(fixed_rows=dict(headers=True, data=1))],
)
@pytest.mark.parametrize(
"fixed_columns",
[
dict(),
dict(fixed_columns=dict(headers=True)),
dict(fixed_columns=dict(headers=True, data=1)),
],
)
@pytest.mark.parametrize(
"ops", [dict(), dict(row_selectable="single", row_deletable=True)]
)
def test_scrol002_edit_navigate(test, fixed_rows, fixed_columns, ops):
props = {**base_props, **fixed_rows, **fixed_columns, **ops}
app = dash.Dash(__name__)
app.layout = DataTable(**props)
test.start_server(app)
target = test.table("table")
assert target.is_ready()
fixed_width = test.driver.execute_script(
"return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-0')).width) || 0;"
)
scroll_by(test, 200)
# alignment is ok after editing a cell
target.cell(0, 3).click()
test.send_keys("abc" + Keys.ENTER)
wait.until(lambda: target.cell(1, 3).is_selected(), 3)
wait.until(lambda: -get_margin(test) == fixed_width + get_scroll(test), 3)
# alignment is ok after navigating
test.send_keys(Keys.ARROW_DOWN)
test.send_keys(Keys.ARROW_RIGHT)
wait.until(lambda: target.cell(2, 4).is_selected(), 3)
wait.until(
lambda: -get_margin(test) == fixed_width + get_scroll(test), 3,
)
assert test.get_log_errors() == []
|
QUERY_PROCESSORS_PYPATHS = [
'addok.helpers.text.check_query_length',
"addok_france.extract_address",
"addok_france.clean_query",
"addok_france.remove_leading_zeros",
]
SEARCH_RESULT_PROCESSORS_PYPATHS = [
"addok.helpers.results.match_housenumber",
"addok_france.make_labels",
"addok.helpers.results.score_by_importance",
"addok.helpers.results.score_by_autocomplete_distance",
"addok.helpers.results.score_by_str_distance",
"addok.helpers.results.score_by_geo_distance",
"addok.helpers.results.adjust_scores",
]
PROCESSORS_PYPATHS = [
"addok.helpers.text.tokenize",
"addok.helpers.text.normalize",
"addok_france.glue_ordinal",
"addok_france.fold_ordinal",
"addok_france.flag_housenumber",
"addok.helpers.text.synonymize",
"addok_fr.phonemicize",
]
SQLITE_DB_PATH = '/srv/addok/addok.db'
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .task_module_ids import TaskModuleIds
from .task_module_response_factory import TaskModuleResponseFactory
from .task_module_ui_constants import TaskModuleUIConstants
from .ui_settings import UISettings
__all__ = [
"TaskModuleIds",
"TaskModuleResponseFactory",
"TaskModuleUIConstants",
"UISettings",
]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class ApplicationResource(ProxyResource):
"""The application resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource ID.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param type_version:
:type type_version: str
:param parameters:
:type parameters:
list[~azure.mgmt.servicefabric.models.ApplicationParameter]
:param upgrade_policy:
:type upgrade_policy:
~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy
:param minimum_nodes: The minimum number of nodes where Service Fabric
will reserve capacity for this application. Note that this does not mean
that the services of this application will be placed on all of those
nodes. If this property is set to zero, no capacity will be reserved. The
value of this property cannot be more than the value of the MaximumNodes
property.
:type minimum_nodes: long
:param maximum_nodes: The maximum number of nodes where Service Fabric
will reserve capacity for this application. Note that this does not mean
that the services of this application will be placed on all of those
nodes. By default, the value of this property is zero and it means that
the services can be placed on any node. Default value: 0 .
:type maximum_nodes: long
:param remove_application_capacity: The version of the application type
:type remove_application_capacity: bool
:param metrics:
:type metrics:
list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription]
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response
:vartype provisioning_state: str
:param type_name:
:type type_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'minimum_nodes': {'minimum': 0},
'maximum_nodes': {'minimum': 0},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type_version': {'key': 'properties.typeVersion', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ApplicationParameter]'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'ApplicationUpgradePolicy'},
'minimum_nodes': {'key': 'properties.minimumNodes', 'type': 'long'},
'maximum_nodes': {'key': 'properties.maximumNodes', 'type': 'long'},
'remove_application_capacity': {'key': 'properties.removeApplicationCapacity', 'type': 'bool'},
'metrics': {'key': 'properties.metrics', 'type': '[ApplicationMetricDescription]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'type_name': {'key': 'properties.typeName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationResource, self).__init__(**kwargs)
self.type_version = kwargs.get('type_version', None)
self.parameters = kwargs.get('parameters', None)
self.upgrade_policy = kwargs.get('upgrade_policy', None)
self.minimum_nodes = kwargs.get('minimum_nodes', None)
self.maximum_nodes = kwargs.get('maximum_nodes', 0)
self.remove_application_capacity = kwargs.get('remove_application_capacity', None)
self.metrics = kwargs.get('metrics', None)
self.provisioning_state = None
self.type_name = kwargs.get('type_name', None)
|
# -*- coding: iso-8859-1 -*-
""" Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import unittest, sys, string, codecs, new
from test import test_support, string_tests
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
class UnicodeTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest,
):
type2test = unicode
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assert_(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(unicode):
def __repr__(self):
return 'usub(%r)' % unicode.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assert_(object is not realresult)
def test_literals(self):
self.assertEqual(u'\xff', u'\u00ff')
self.assertEqual(u'\uffff', u'\U0000ffff')
self.assertRaises(UnicodeError, eval, 'u\'\\Ufffffffe\'')
self.assertRaises(UnicodeError, eval, 'u\'\\Uffffffff\'')
self.assertRaises(UnicodeError, eval, 'u\'\\U%08x\'' % 0x110000)
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr(u'abc'), "u'abc'")
self.assertEqual(repr(u'ab\\c'), "u'ab\\\\c'")
self.assertEqual(repr(u'ab\\'), "u'ab\\\\'")
self.assertEqual(repr(u'\\c'), "u'\\\\c'")
self.assertEqual(repr(u'\\'), "u'\\\\'")
self.assertEqual(repr(u'\n'), "u'\\n'")
self.assertEqual(repr(u'\r'), "u'\\r'")
self.assertEqual(repr(u'\t'), "u'\\t'")
self.assertEqual(repr(u'\b'), "u'\\x08'")
self.assertEqual(repr(u"'\""), """u'\\'"'""")
self.assertEqual(repr(u"'\""), """u'\\'"'""")
self.assertEqual(repr(u"'"), '''u"'"''')
self.assertEqual(repr(u'"'), """u'"'""")
latin1repr = (
"u'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = repr(u''.join(map(unichr, xrange(256))))
self.assertEqual(testrepr, latin1repr)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', u'a')
self.checkequalnofix(0, 'aaa', 'count', u'b')
self.checkequalnofix(3, u'aaa', 'count', 'a')
self.checkequalnofix(0, u'aaa', 'count', 'b')
self.checkequalnofix(0, u'aaa', 'count', 'b')
self.checkequalnofix(1, u'aaa', 'count', 'a', -1)
self.checkequalnofix(3, u'aaa', 'count', 'a', -10)
self.checkequalnofix(2, u'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, u'aaa', 'count', 'a', 0, -10)
def test_find(self):
self.checkequalnofix(0, u'abcdefghiabc', 'find', u'abc')
self.checkequalnofix(9, u'abcdefghiabc', 'find', u'abc', 1)
self.checkequalnofix(-1, u'abcdefghiabc', 'find', u'def', 4)
self.assertRaises(TypeError, u'hello'.find)
self.assertRaises(TypeError, u'hello'.find, 42)
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', u'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', u'')
self.checkequalnofix(12, u'abcdefghiabc', 'rfind', '')
def test_index(self):
string_tests.CommonTest.test_index(self)
# check mixed argument types
for (t1, t2) in ((str, unicode), (unicode, str)):
self.checkequalnofix(0, t1('abcdefghiabc'), 'index', t2(''))
self.checkequalnofix(3, t1('abcdefghiabc'), 'index', t2('def'))
self.checkequalnofix(0, t1('abcdefghiabc'), 'index', t2('abc'))
self.checkequalnofix(9, t1('abcdefghiabc'), 'index', t2('abc'), 1)
self.assertRaises(ValueError, t1('abcdefghiabc').index, t2('hib'))
self.assertRaises(ValueError, t1('abcdefghiab').index, t2('abc'), 1)
self.assertRaises(ValueError, t1('abcdefghi').index, t2('ghi'), 8)
self.assertRaises(ValueError, t1('abcdefghi').index, t2('ghi'), -1)
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
# check mixed argument types
for (t1, t2) in ((str, unicode), (unicode, str)):
self.checkequalnofix(12, t1('abcdefghiabc'), 'rindex', t2(''))
self.checkequalnofix(3, t1('abcdefghiabc'), 'rindex', t2('def'))
self.checkequalnofix(9, t1('abcdefghiabc'), 'rindex', t2('abc'))
self.checkequalnofix(0, t1('abcdefghiabc'), 'rindex', t2('abc'), 0, -1)
self.assertRaises(ValueError, t1('abcdefghiabc').rindex, t2('hib'))
self.assertRaises(ValueError, t1('defghiabc').rindex, t2('def'), 1)
self.assertRaises(ValueError, t1('defghiabc').rindex, t2('abc'), 0, -1)
self.assertRaises(ValueError, t1('abcdefghi').rindex, t2('ghi'), 0, 8)
self.assertRaises(ValueError, t1('abcdefghi').rindex, t2('ghi'), 0, -1)
def test_translate(self):
self.checkequalnofix(u'bbbc', u'abababc', 'translate', {ord('a'):None})
self.checkequalnofix(u'iiic', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i')})
self.checkequalnofix(u'iiix', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i'), ord('c'):u'x'})
self.checkequalnofix(u'<i><i><i>c', u'abababc', 'translate', {ord('a'):None, ord('b'):u'<i>'})
self.checkequalnofix(u'c', u'abababc', 'translate', {ord('a'):None, ord('b'):u''})
self.checkequalnofix(u'xyyx', u'xzx', 'translate', {ord('z'):u'yy'})
self.assertRaises(TypeError, u'hello'.translate)
self.assertRaises(TypeError, u'abababc'.translate, {ord('a'):''})
def test_split(self):
string_tests.CommonTest.test_split(self)
# Mixed arguments
self.checkequalnofix([u'a', u'b', u'c', u'd'], u'a//b//c//d', 'split', '//')
self.checkequalnofix([u'a', u'b', u'c', u'd'], 'a//b//c//d', 'split', u'//')
self.checkequalnofix([u'endcase ', u''], u'endcase test', 'split', 'test')
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
# mixed arguments
self.checkequalnofix(u'a b c d', u' ', 'join', ['a', 'b', u'c', u'd'])
self.checkequalnofix(u'abcd', u'', 'join', (u'a', u'b', u'c', u'd'))
self.checkequalnofix(u'w x y z', u' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix(u'a b c d', ' ', 'join', [u'a', u'b', u'c', u'd'])
self.checkequalnofix(u'a b c d', ' ', 'join', ['a', 'b', u'c', u'd'])
self.checkequalnofix(u'abcd', '', 'join', (u'a', u'b', u'c', u'd'))
self.checkequalnofix(u'w x y z', ' ', 'join', string_tests.Sequence(u'wxyz'))
def test_strip(self):
string_tests.CommonTest.test_strip(self)
self.assertRaises(UnicodeError, u"hello".strip, "\xff")
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix(u'one@two!three!', 'one!two!three!', 'replace', u'!', u'@', 1)
self.assertRaises(TypeError, 'replace'.replace, u"r", 42)
def test_comparison(self):
# Comparisons:
self.assertEqual(u'abc', 'abc')
self.assertEqual('abc', u'abc')
self.assertEqual(u'abc', u'abc')
self.assert_(u'abcd' > 'abc')
self.assert_('abcd' > u'abc')
self.assert_(u'abcd' > u'abc')
self.assert_(u'abc' < 'abcd')
self.assert_('abc' < u'abcd')
self.assert_(u'abc' < u'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assert_(u'\u0061' < u'\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assert_(u'\u0061' < u'\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assert_(s < s2)
def test_fixup(s):
s2 = u'\ud800\udc01'
test_lecmp(s, s2)
s2 = u'\ud900\udc01'
test_lecmp(s, s2)
s2 = u'\uda00\udc01'
test_lecmp(s, s2)
s2 = u'\udb00\udc01'
test_lecmp(s, s2)
s2 = u'\ud800\udd01'
test_lecmp(s, s2)
s2 = u'\ud900\udd01'
test_lecmp(s, s2)
s2 = u'\uda00\udd01'
test_lecmp(s, s2)
s2 = u'\udb00\udd01'
test_lecmp(s, s2)
s2 = u'\ud800\ude01'
test_lecmp(s, s2)
s2 = u'\ud900\ude01'
test_lecmp(s, s2)
s2 = u'\uda00\ude01'
test_lecmp(s, s2)
s2 = u'\udb00\ude01'
test_lecmp(s, s2)
s2 = u'\ud800\udfff'
test_lecmp(s, s2)
s2 = u'\ud900\udfff'
test_lecmp(s, s2)
s2 = u'\uda00\udfff'
test_lecmp(s, s2)
s2 = u'\udb00\udfff'
test_lecmp(s, s2)
test_fixup(u'\ue000')
test_fixup(u'\uff61')
# Surrogates on both sides, no fixup required
self.assert_(u'\ud800\udc02' < u'\ud84d\udc56')
def test_islower(self):
string_tests.MixinStrUnicodeUserStringTest.test_islower(self)
self.checkequalnofix(False, u'\u1FFc', 'islower')
def test_isupper(self):
string_tests.MixinStrUnicodeUserStringTest.test_isupper(self)
if not sys.platform.startswith('java'):
self.checkequalnofix(False, u'\u1FFc', 'isupper')
def test_istitle(self):
string_tests.MixinStrUnicodeUserStringTest.test_title(self)
self.checkequalnofix(True, u'\u1FFc', 'istitle')
self.checkequalnofix(True, u'Greek \u1FFcitlecases ...', 'istitle')
def test_isspace(self):
string_tests.MixinStrUnicodeUserStringTest.test_isspace(self)
self.checkequalnofix(True, u'\u2000', 'isspace')
self.checkequalnofix(True, u'\u200a', 'isspace')
self.checkequalnofix(False, u'\u2014', 'isspace')
def test_isalpha(self):
string_tests.MixinStrUnicodeUserStringTest.test_isalpha(self)
self.checkequalnofix(True, u'\u1FFc', 'isalpha')
def test_isdecimal(self):
self.checkequalnofix(False, u'', 'isdecimal')
self.checkequalnofix(False, u'a', 'isdecimal')
self.checkequalnofix(True, u'0', 'isdecimal')
self.checkequalnofix(False, u'\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, u'\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, u'\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, u'0123456789', 'isdecimal')
self.checkequalnofix(False, u'0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
def test_isdigit(self):
string_tests.MixinStrUnicodeUserStringTest.test_isdigit(self)
self.checkequalnofix(True, u'\u2460', 'isdigit')
self.checkequalnofix(False, u'\xbc', 'isdigit')
self.checkequalnofix(True, u'\u0660', 'isdigit')
def test_isnumeric(self):
self.checkequalnofix(False, u'', 'isnumeric')
self.checkequalnofix(False, u'a', 'isnumeric')
self.checkequalnofix(True, u'0', 'isnumeric')
self.checkequalnofix(True, u'\u2460', 'isnumeric')
self.checkequalnofix(True, u'\xbc', 'isnumeric')
self.checkequalnofix(True, u'\u0660', 'isnumeric')
self.checkequalnofix(True, u'0123456789', 'isnumeric')
self.checkequalnofix(False, u'0123456789a', 'isnumeric')
self.assertRaises(TypeError, u"abc".isnumeric, 42)
def test_contains(self):
# Testing Unicode contains method
self.assert_('a' in u'abdb')
self.assert_('a' in u'bdab')
self.assert_('a' in u'bdaba')
self.assert_('a' in u'bdba')
self.assert_('a' in u'bdba')
self.assert_(u'a' in u'bdba')
self.assert_(u'a' not in u'bdb')
self.assert_(u'a' not in 'bdb')
self.assert_(u'a' in 'bdba')
self.assert_(u'a' in ('a',1,None))
self.assert_(u'a' in (1,None,'a'))
self.assert_(u'a' in (1,None,u'a'))
self.assert_('a' in ('a',1,None))
self.assert_('a' in (1,None,'a'))
self.assert_('a' in (1,None,u'a'))
self.assert_('a' not in ('x',1,u'y'))
self.assert_('a' not in ('x',1,None))
self.assert_(u'abcd' not in u'abcxxxx')
self.assert_(u'ab' in u'abcd')
self.assert_('ab' in u'abc')
self.assert_(u'ab' in 'abc')
self.assert_(u'ab' in (1,None,u'ab'))
self.assert_(u'' in u'abc')
self.assert_('' in u'abc')
# If the following fails either
# the contains operator does not propagate UnicodeErrors or
# someone has changed the default encoding
self.assertRaises(UnicodeError, 'g\xe2teau'.__contains__, u'\xe2')
self.assert_(u'' in '')
self.assert_('' in u'')
self.assert_(u'' in u'')
self.assert_(u'' in 'abc')
self.assert_('' in u'abc')
self.assert_(u'' in u'abc')
self.assert_(u'\0' not in 'abc')
self.assert_('\0' not in u'abc')
self.assert_(u'\0' not in u'abc')
self.assert_(u'\0' in '\0abc')
self.assert_('\0' in u'\0abc')
self.assert_(u'\0' in u'\0abc')
self.assert_(u'\0' in 'abc\0')
self.assert_('\0' in u'abc\0')
self.assert_(u'\0' in u'abc\0')
self.assert_(u'a' in '\0abc')
self.assert_('a' in u'\0abc')
self.assert_(u'a' in u'\0abc')
self.assert_(u'asdf' in 'asdf')
self.assert_('asdf' in u'asdf')
self.assert_(u'asdf' in u'asdf')
self.assert_(u'asdf' not in 'asd')
self.assert_('asdf' not in u'asd')
self.assert_(u'asdf' not in u'asd')
self.assert_(u'asdf' not in '')
self.assert_('asdf' not in u'')
self.assert_(u'asdf' not in u'')
self.assertRaises(TypeError, u"abc".__contains__)
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual(u"%s, %s" % (u"abc", "abc"), u'abc, abc')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, 2, 3), u'abc, abc, 1, 2.000000, 3.00')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, -2, 3), u'abc, abc, 1, -2.000000, 3.00')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.5), u'abc, abc, -1, -2.000000, 3.50')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.57), u'abc, abc, -1, -2.000000, 3.57')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 1003.57), u'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual(u"%r, %r" % (u"abc", "abc"), u"u'abc', 'abc'")
self.assertEqual(u"%(x)s, %(y)s" % {'x':u"abc", 'y':"def"}, u'abc, def')
self.assertEqual(u"%(x)s, %(\xfc)s" % {'x':u"abc", u'\xfc':"def"}, u'abc, def')
self.assertEqual(u'%c' % 0x1234, u'\u1234')
self.assertRaises(OverflowError, u"%c".__mod__, (sys.maxunicode+1,))
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':u"abc"}, u'...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':u"abc"}, u'...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':u"abc",'def':123}, u'...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':u"abc",u'def':123}, u'...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % u"abc", u'...abc...')
self.assertEqual('%*s' % (5,u'abc',), u' abc')
self.assertEqual('%*s' % (-5,u'abc',), u'abc ')
self.assertEqual('%*.*s' % (5,2,u'abc',), u' ab')
self.assertEqual('%*.*s' % (5,3,u'abc',), u' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,u'abc',), u'10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, u'abc',), u'103 abc')
self.assertEqual('%c' % u'a', u'a')
def test_format_float(self):
try:
import locale
orig_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'de_DE')
except (ImportError, locale.Error):
return # skip if we can't set locale
try:
# should not format with a comma, but always with C locale
self.assertEqual(u'1.0', u'%.1f' % 1.0)
finally:
locale.setlocale(locale.LC_ALL, orig_locale)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
unicode(u'unicode remains unicode'),
u'unicode remains unicode'
)
class UnicodeSubclass(unicode):
pass
self.assertEqual(
unicode(UnicodeSubclass('unicode subclass becomes unicode')),
u'unicode subclass becomes unicode'
)
self.assertEqual(
unicode('strings are converted to unicode'),
u'strings are converted to unicode'
)
class UnicodeCompat:
def __init__(self, x):
self.x = x
def __unicode__(self):
return self.x
self.assertEqual(
unicode(UnicodeCompat('__unicode__ compatible objects are recognized')),
u'__unicode__ compatible objects are recognized')
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
unicode(StringCompat('__str__ compatible objects are recognized')),
u'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(unicode(o), u'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
# %-formatting and .__unicode__()
self.assertEqual(u'%s' %
UnicodeCompat(u"u'%s' % obj uses obj.__unicode__()"),
u"u'%s' % obj uses obj.__unicode__()")
self.assertEqual(u'%s' %
UnicodeCompat(u"u'%s' % obj falls back to obj.__str__()"),
u"u'%s' % obj falls back to obj.__str__()")
for obj in (123, 123.45, 123L):
self.assertEqual(unicode(obj), unicode(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
unicode,
u'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
unicode('strings are decoded to unicode', 'utf-8', 'strict'),
u'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
unicode(
buffer('character buffers are decoded to unicode'),
'utf-8',
'strict'
),
u'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, unicode, 42, 42, 42)
def test_codecs_utf7(self):
utfTests = [
(u'A\u2262\u0391.', 'A+ImIDkQ.'), # RFC2152 example
(u'Hi Mom -\u263a-!', 'Hi Mom -+Jjo--!'), # RFC2152 example
(u'\u65E5\u672C\u8A9E', '+ZeVnLIqe-'), # RFC2152 example
(u'Item 3 is \u00a31.', 'Item 3 is +AKM-1.'), # RFC2152 example
(u'+', '+-'),
(u'+-', '+--'),
(u'+?', '+-?'),
(u'\?', '+AFw?'),
(u'+?', '+-?'),
(ur'\\?', '+AFwAXA?'),
(ur'\\\?', '+AFwAXABc?'),
(ur'++--', '+-+---')
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# surrogates not supported
self.assertRaises(UnicodeError, unicode, '+3ADYAA-', 'utf-7')
self.assertEqual(unicode('+3ADYAA-', 'utf-7', 'replace'), u'\ufffd')
def test_codecs_utf8(self):
self.assertEqual(u''.encode('utf-8'), '')
self.assertEqual(u'\u20ac'.encode('utf-8'), '\xe2\x82\xac')
self.assertEqual(u'\ud800\udc02'.encode('utf-8'), '\xf0\x90\x80\x82')
self.assertEqual(u'\ud84d\udc56'.encode('utf-8'), '\xf0\xa3\x91\x96')
self.assertEqual(u'\ud800'.encode('utf-8'), '\xed\xa0\x80')
self.assertEqual(u'\udc00'.encode('utf-8'), '\xed\xb0\x80')
self.assertEqual(
(u'\ud800\udc02'*1000).encode('utf-8'),
'\xf0\x90\x80\x82'*1000
)
self.assertEqual(
u'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
u'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
u'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
u'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
u'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
u' Nunstuck git und'.encode('utf-8'),
'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(unicode('\xf0\xa3\x91\x96', 'utf-8'), u'\U00023456' )
self.assertEqual(unicode('\xf0\x90\x80\x82', 'utf-8'), u'\U00010002' )
self.assertEqual(unicode('\xe2\x82\xac', 'utf-8'), u'\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual(u"www.python.org.".encode("idna"), "www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual(u'Andr\202 x'.encode('ascii','ignore'), "Andr x")
self.assertEqual(u'Andr\202 x'.encode('ascii','replace'), "Andr? x")
# Error handling (decoding)
self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii','strict')
self.assertEqual(unicode('Andr\202 x','ascii','ignore'), u"Andr x")
self.assertEqual(unicode('Andr\202 x','ascii','replace'), u'Andr\uFFFD x')
# Error handling (unknown character names)
self.assertEqual("\\N{foo}xx".decode("unicode-escape", "ignore"), u"xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, "\\".decode, "unicode-escape")
self.assertRaises(TypeError, "hello".decode, "test.unicode1")
self.assertRaises(TypeError, unicode, "hello", "test.unicode2")
self.assertRaises(TypeError, u"hello".encode, "test.unicode1")
self.assertRaises(TypeError, u"hello".encode, "test.unicode2")
# executes PyUnicode_Encode()
import imp
self.assertRaises(
ImportError,
imp.find_module,
"non-existing module",
[u"non-existing dir"]
)
# Error handling (wrong arguments)
self.assertRaises(TypeError, u"hello".encode, 42, 42, 42)
# Error handling (PyUnicode_EncodeDecimal())
self.assertRaises(UnicodeError, int, u"\u0200")
def test_codecs(self):
# Encoding
self.assertEqual(u'hello'.encode('ascii'), 'hello')
self.assertEqual(u'hello'.encode('utf-7'), 'hello')
self.assertEqual(u'hello'.encode('utf-8'), 'hello')
self.assertEqual(u'hello'.encode('utf8'), 'hello')
self.assertEqual(u'hello'.encode('utf-16-le'), 'h\000e\000l\000l\000o\000')
self.assertEqual(u'hello'.encode('utf-16-be'), '\000h\000e\000l\000l\000o')
self.assertEqual(u'hello'.encode('latin-1'), 'hello')
# Roundtrip safety for BMP (just the first 1024 chars)
u = u''.join(map(unichr, xrange(1024)))
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
'raw_unicode_escape', 'unicode_escape', 'unicode_internal'):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
u = u''.join(map(unichr, xrange(256)))
for encoding in ('latin-1',):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
u = u''.join(map(unichr, xrange(128)))
for encoding in ('ascii',):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
u = u'\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
#'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all UCS-2 code points
# This excludes surrogates: in the full range, there would be
# a surrogate pair (\udbff\udc00), which gets converted back
# to a non-BMP character (\U0010fc00)
u = u''.join(map(unichr, range(0,0xd800)+range(0xe000,0x10000)))
for encoding in ('utf-8',):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = ''.join(map(chr, xrange(128)))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(unicode(s, encoding).encode(encoding), s)
# 128-255
s = ''.join(map(chr, xrange(128, 256)))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(unicode(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual((u"abc" u"def"), u"abcdef")
self.assertEqual(("abc" u"def"), u"abcdef")
self.assertEqual((u"abc" "def"), u"abcdef")
self.assertEqual((u"abc" u"def" "ghi"), u"abcdefghi")
self.assertEqual(("abc" "def" u"ghi"), u"abcdefghi")
def test_printing(self):
class BitBucket:
def write(self, text):
pass
out = BitBucket()
print >>out, u'abc'
print >>out, u'abc', u'def'
print >>out, u'abc', 'def'
print >>out, 'abc', u'def'
print >>out, u'abc\n'
print >>out, u'abc\n',
print >>out, u'abc\n',
print >>out, u'def\n'
print >>out, u'def\n'
def test_ucs4(self):
if sys.maxunicode == 0xFFFF:
return
x = u'\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return u'\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_main():
test_support.run_unittest(UnicodeTest)
if __name__ == "__main__":
test_main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: benchmark-dataflow.py
import argparse
import cv2
from tensorpack import *
from tensorpack.dataflow.imgaug import *
from tensorpack.dataflow.parallel import PlasmaGetData, PlasmaPutData # noqa
from tensorpack.utils.serialize import loads
import augmentors
def test_orig(dir, name, augs, batch):
ds = dataset.ILSVRC12(dir, name, shuffle=True)
ds = AugmentImageComponent(ds, augs)
ds = BatchData(ds, batch)
# ds = PlasmaPutData(ds)
ds = MultiProcessRunnerZMQ(ds, 50, hwm=80)
# ds = PlasmaGetData(ds)
return ds
def test_lmdb_train(db, augs, batch):
ds = LMDBData(db, shuffle=False)
ds = LocallyShuffleData(ds, 50000)
ds = MultiProcessRunner(ds, 5000, 1)
return ds
ds = LMDBDataPoint(ds)
def f(x):
return cv2.imdecode(x, cv2.IMREAD_COLOR)
ds = MapDataComponent(ds, f, 0)
ds = AugmentImageComponent(ds, augs)
ds = BatchData(ds, batch, use_list=True)
# ds = PlasmaPutData(ds)
ds = MultiProcessRunnerZMQ(ds, 40, hwm=80)
# ds = PlasmaGetData(ds)
return ds
def test_lmdb_inference(db, augs, batch):
ds = LMDBData(db, shuffle=False)
# ds = LocallyShuffleData(ds, 50000)
augs = AugmentorList(augs)
def mapper(data):
im, label = loads(data[1])
im = cv2.imdecode(im, cv2.IMREAD_COLOR)
im = augs.augment(im)
return im, label
ds = MultiProcessMapData(ds, 40, mapper,
buffer_size=200)
# ds = MultiThreadMapData(ds, 40, mapper, buffer_size=2000)
ds = BatchData(ds, batch)
ds = MultiProcessRunnerZMQ(ds, 1)
return ds
def test_inference(dir, name, augs, batch=128):
ds = dataset.ILSVRC12Files(dir, name, shuffle=False, dir_structure='train')
aug = imgaug.AugmentorList(augs)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, 30, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch)
ds = MultiProcessRunnerZMQ(ds, 1)
return ds
if __name__ == '__main__':
available_augmentors = [
k[:-len("_augmentor")]
for k in augmentors.__all__ if k.endswith('_augmentor')]
parser = argparse.ArgumentParser()
parser.add_argument('data', help='file or directory of dataset')
parser.add_argument('--batch', type=int, default=64)
parser.add_argument('--name', choices=['train', 'val'], default='train')
parser.add_argument('--aug', choices=available_augmentors, required=True)
args = parser.parse_args()
augs = getattr(augmentors, args.aug + '_augmentor')()
if args.data.endswith('lmdb'):
if args.name == 'train':
ds = test_lmdb_train(args.data, augs, args.batch)
else:
ds = test_lmdb_inference(args.data, augs, args.batch)
else:
if args.name == 'train':
ds = test_orig(args.data, args.name, augs, args.batch)
else:
ds = test_inference(args.data, args.name, augs, args.batch)
TestDataSpeed(ds, 500000, warmup=100).start()
|
#!/usr/bin/env python
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.strategy.dev_1_get_order_book import GetOrderBookStrategy
from hummingbot.strategy.dev_1_get_order_book.dev_1_get_order_book_config_map import dev_1_get_order_book_config_map
def start(self):
try:
exchange: str = dev_1_get_order_book_config_map.get("exchange").value.lower()
trading_pair: str = dev_1_get_order_book_config_map.get("trading_pair").value
self._initialize_markets([(exchange, [trading_pair])])
exchange: ExchangeBase = self.markets[exchange]
self.strategy = GetOrderBookStrategy(exchange=exchange,
trading_pair=trading_pair,
)
except Exception as e:
self._notify(str(e))
self.logger().error("Unknown error during initialization.", exc_info=True)
|
import logging
import sys
import time
import datetime
import unittest
import spot_db
from spot_msk import SpotMsk
import json, requests
import logging, logging.config, yaml
logging.config.dictConfig(yaml.load(open('logging.conf')))
logfl = logging.getLogger('file')
logconsole = logging.getLogger('console')
logfl.debug("Debug FILE")
logconsole.debug("Debug CONSOLE")
class TestAccess(unittest.TestCase):
def echo_elapsed_time(self):
elapsed = time.time() - self._started_at
elapsed_step = time.time() - self._step_started_at
self._total_steps_cnt += 1.0
self._total_steps_elapsed += elapsed_step
avg_elapsed = self._total_steps_elapsed / self._total_steps_cnt
logging.info("total_elapsed=" + str(round(elapsed, 2)) + " step_elapsed=" + str(round(elapsed_step, 2)) + " avg_elapsed=" + str(round(avg_elapsed, 2)))
def echo(self,r):
logging.info("response=" + str(r))
logging.info("response.headers=" + str(r.headers))
logging.info("response.text=" + str(r.text))
self.echo_elapsed_time()
@classmethod
def setUpClass(self):
self._started_at = time.time()
self._total_steps_cnt = 0
self._total_steps_elapsed = 0
self.msk = SpotMsk()
logging.info('executing setUpClass')
def test_00_msk_parking(self):
self.msk.get_datasets()
def test_01_msk_622(self):
self.msk.traverse_dataset(622)
def test_01_parking_datasets(self):
dss = self.msk.get_datasets()
cnt = 0
for ds in sorted(dss):
cnt += self.msk.traverse_dataset(ds)
logging.info('total datasets '+str(cnt))
@classmethod
def tearDownClass(self):
logging.info('executing tearDownClass')
self._step_started_at = time.time()
elapsed = time.time() - self._started_at
elapsed_step = time.time() - self._step_started_at
self._total_steps_cnt += 1.0
self._total_steps_elapsed += elapsed_step
avg_elapsed = self._total_steps_elapsed / self._total_steps_cnt
logging.info("total_elapsed=" + str(round(elapsed, 2)) + " step_elapsed=" + str(round(elapsed_step, 2)) + " avg_elapsed=" + str(round(avg_elapsed, 2)))
logging.info('executed tearDownClass')
if __name__ == '__main__':
unittest.main()
|
import robin_stocks as r
import pandas as pd
import numpy as np
import ta as ta
from pandas.plotting import register_matplotlib_converters
from ta import *
from misc import *
from tradingstats import *
#Log in to Robinhood
login = r.login('YOUR_EMAIL','YOUR_PASSWORD')
#Safe divide by zero division function
def safe_division(n, d):
return n / d if d else 0
def get_watchlist_symbols():
"""
Returns: the symbol for each stock in your watchlist as a list of strings
"""
my_list_names = []
symbols = []
for name in r.get_all_watchlists(info='name'):
my_list_names.append(name)
for name in my_list_names:
list = r.get_watchlist_by_name(name)
for item in list:
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = r.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_position_creation_date(symbol, holdings_data):
"""Returns the time at which we bought a certain stock in our portfolio
Args:
symbol(str): Symbol of the stock that we are trying to figure out when it was bought
holdings_data(dict): dict returned by r.get_open_stock_positions()
Returns:
A string containing the date and time the stock was bought, or "Not found" otherwise
"""
instrument = r.get_instruments_by_symbols(symbol)
url = instrument[0].get('url')
for dict in holdings_data:
if(dict.get('instrument') == url):
return dict.get('created_at')
return "Not found"
def get_modified_holdings():
""" Retrieves the same dictionary as r.build_holdings, but includes data about
when the stock was purchased, which is useful for the read_trade_history() method
in tradingstats.py
Returns:
the same dict from r.build_holdings, but with an extra key-value pair for each
position you have, which is 'bought_at': (the time the stock was purchased)
"""
holdings = r.build_holdings()
holdings_data = r.get_open_stock_positions()
for symbol, dict in holdings.items():
bought_at = get_position_creation_date(symbol, holdings_data)
bought_at = str(pd.to_datetime(bought_at))
holdings[symbol].update({'bought_at': bought_at})
return holdings
def get_last_crossing(df, days, symbol="", direction=""):
"""Searches for a crossing between two indicators for a given stock
Args:
df(pandas.core.frame.DataFrame): Pandas dataframe with columns containing the stock's prices, both indicators, and the dates
days(int): Specifies the maximum number of days that the cross can occur by
symbol(str): Symbol of the stock we're querying. Optional, used for printing purposes
direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes
Returns:
1 if the short-term indicator crosses above the long-term one
0 if there is no cross between the indicators
-1 if the short-term indicator crosses below the long-term one
"""
prices = df.loc[:,"Price"]
shortTerm = df.loc[:,"Indicator1"]
LongTerm = df.loc[:,"Indicator2"]
dates = df.loc[:,"Dates"]
lastIndex = prices.size - 1
index = lastIndex
found = index
recentDiff = (shortTerm.at[index] - LongTerm.at[index]) >= 0
if((direction == "above" and not recentDiff) or (direction == "below" and recentDiff)):
return 0
index -= 1
while(index >= 0 and found == lastIndex and not np.isnan(shortTerm.at[index]) and not np.isnan(LongTerm.at[index]) \
and ((pd.Timestamp("now", tz='UTC') - dates.at[index]) <= pd.Timedelta(str(days) + " days"))):
if(recentDiff):
if((shortTerm.at[index] - LongTerm.at[index]) < 0):
found = index
else:
if((shortTerm.at[index] - LongTerm.at[index]) > 0):
found = index
index -= 1
if(found != lastIndex):
if((direction == "above" and recentDiff) or (direction == "below" and not recentDiff)):
print(symbol + ": Short SMA crossed" + (" ABOVE " if recentDiff else " BELOW ") + "Long SMA at " + str(dates.at[found]) \
+", which was " + str(pd.Timestamp("now", tz='UTC') - dates.at[found]) + " ago", ", price at cross: " + str(prices.at[found]) \
+ ", current price: " + str(prices.at[lastIndex]))
return (1 if recentDiff else -1)
else:
return 0
def five_year_check(stockTicker):
"""Figure out if a stock has risen or been created within the last five years.
Args:
stockTicker(str): Symbol of the stock we're querying
Returns:
True if the stock's current price is higher than it was five years ago, or the stock IPO'd within the last five years
False otherwise
"""
instrument = r.get_instruments_by_symbols(stockTicker)
list_date = instrument[0].get("list_date")
if ((pd.Timestamp("now") - pd.to_datetime(list_date)) < pd.Timedelta("5 Y")):
return True
fiveyear = r.get_historicals(stockTicker,span='5year',bounds='regular')
closingPrices = []
for item in fiveyear:
closingPrices.append(float(item['close_price']))
recent_price = closingPrices[len(closingPrices) - 1]
oldest_price = closingPrices[0]
return (recent_price > oldest_price)
def golden_cross(stockTicker, n1, n2, days, direction=""):
"""Determine if a golden/death cross has occured for a specified stock in the last X trading days
Args:
stockTicker(str): Symbol of the stock we're querying
n1(int): Specifies the short-term indicator as an X-day moving average.
n2(int): Specifies the long-term indicator as an X-day moving average.
(n1 should be smaller than n2 to produce meaningful results, e.g n1=50, n2=200)
days(int): Specifies the maximum number of days that the cross can occur by
direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes
Returns:
1 if the short-term indicator crosses above the long-term one
0 if there is no cross between the indicators
-1 if the short-term indicator crosses below the long-term one
False if direction == "above" and five_year_check(stockTicker) returns False, meaning that we're considering whether to
buy the stock but it hasn't risen overall in the last five years, suggesting it contains fundamental issues
"""
if(direction == "above" and not five_year_check(stockTicker)):
return False
history = r.get_historicals(stockTicker,span='year',bounds='regular')
closingPrices = []
dates = []
for item in history:
closingPrices.append(float(item['close_price']))
dates.append(item['begins_at'])
price = pd.Series(closingPrices)
dates = pd.Series(dates)
dates = pd.to_datetime(dates)
sma1 = ta.volatility.bollinger_mavg(price, n=int(n1), fillna=False)
sma2 = ta.volatility.bollinger_mavg(price, n=int(n2), fillna=False)
series = [price.rename("Price"), sma1.rename("Indicator1"), sma2.rename("Indicator2"), dates.rename("Dates")]
df = pd.concat(series, axis=1)
cross = get_last_crossing(df, days, symbol=stockTicker, direction=direction)
# if(cross):
# show_plot(price, sma1, sma2, dates, symbol=stockTicker, label1=str(n1)+" day SMA", label2=str(n2)+" day SMA")
return cross
def sell_holdings(symbol, holdings_data):
""" Place an order to sell all holdings of a stock.
Args:
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from get_modified_holdings() method
"""
shares_owned = int(float(holdings_data[symbol].get("quantity")))
r.order_sell_market(symbol, shares_owned)
print("####### Selling " + str(shares_owned) + " shares of " + symbol + " #######")
def buy_holdings(potential_buys, profile_data, holdings_data):
""" Places orders to buy holdings of stocks. This method will try to order
an appropriate amount of shares such that your holdings of the stock will
roughly match the average for the rest of your portfoilio. If the share
price is too high considering the rest of your holdings and the amount of
buying power in your account, it will not order any shares.
Args:
potential_buys(list): List of strings, the strings are the symbols of stocks we want to buy
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from r.build_holdings() or get_modified_holdings() method
"""
cash = float(profile_data.get('cash'))
portfolio_value = float(profile_data.get('equity')) - cash
ideal_position_size = (safe_division(portfolio_value, len(holdings_data))+cash/len(potential_buys))/(2 * len(potential_buys))
prices = r.get_latest_price(potential_buys)
for i in range(0, len(potential_buys)):
stock_price = float(prices[i])
if(ideal_position_size < stock_price < ideal_position_size*1.5):
num_shares = int(ideal_position_size*1.5/stock_price)
elif (stock_price < ideal_position_size):
num_shares = int(ideal_position_size/stock_price)
else:
print("####### Tried buying shares of " + potential_buys[i] + ", but not enough buying power to do so#######")
break
print("####### Buying " + str(num_shares) + " shares of " + potential_buys[i] + " #######")
r.order_buy_market(potential_buys[i], num_shares)
def scan_stocks():
""" The main method. Sells stocks in your portfolio if their 50 day moving average crosses
below the 200 day, and buys stocks in your watchlist if the opposite happens.
###############################################################################################
WARNING: Comment out the sell_holdings and buy_holdings lines if you don't actually want to execute the trade.
###############################################################################################
If you sell a stock, this updates tradehistory.txt with information about the position,
how much you've earned/lost, etc.
"""
print("----- Starting scan... -----\n")
register_matplotlib_converters()
watchlist_symbols = get_watchlist_symbols()
portfolio_symbols = get_portfolio_symbols()
holdings_data = get_modified_holdings()
potential_buys = []
sells = []
print("Current Portfolio: " + str(portfolio_symbols) + "\n")
print("Current Watchlist: " + str(watchlist_symbols) + "\n")
print("----- Scanning portfolio for stocks to sell -----\n")
for symbol in portfolio_symbols:
cross = golden_cross(symbol, n1=50, n2=200, days=30, direction="below")
if(cross == -1):
sell_holdings(symbol, holdings_data)
sells.append(symbol)
profile_data = r.build_user_profile()
print("\n----- Scanning watchlist for stocks to buy -----\n")
for symbol in watchlist_symbols:
if(symbol not in portfolio_symbols):
cross = golden_cross(symbol, n1=50, n2=200, days=10, direction="above")
if(cross == 1):
potential_buys.append(symbol)
if(len(potential_buys) > 0):
buy_holdings(potential_buys, profile_data, holdings_data)
if(len(sells) > 0):
update_trade_history(sells, holdings_data, "tradehistory.txt")
print("----- Scan over -----\n")
#execute the scan
scan_stocks()
|
#!/usr/bin/env python3
'''
diffinfo.py
Copyright 2012-2017 Codinuum Software Lab <http://codinuum.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import re
import os
import logging
import pathsetup
from fragment import Fragment
import gzip
logger = logging.getLogger()
read_delete_insert_info_size_threshold = 4
excl_L_pat = re.compile('\) \[')
excl_R_pat = re.compile('(?P<list>.*)\]')
num_pat = re.compile('(?P<num>[0-9]+);?')
def get_excluded(s):
result = []
l = excl_L_pat.finditer(s)
start = -1
for m in l:
start = m.end()
if start > 0:
m = excl_R_pat.search(s, start)
if m:
s = m.group('list')
result = [int(x) for x in num_pat.findall(s)]
return result
named_node_pat_s = '\((?P<size>[0-9]+)\) \(([0-9]+):(?P<gnid>[0-9]+)\)c:(?P<kind>.*) name=\'(?P<name>.*)\'(?P<rest>.*)\((?P<loc>[0-9]+L.*)\)(?P<exc>.*)\((?P<elems>.*)\)$'
pat_s = '\((?P<size>[0-9]+)\) \(([0-9]+):(?P<gnid>[0-9]+)\)c:(?P<kind>.*)\((?P<loc>[0-9]+L.*)\)(?P<exc>.*)\((?P<elems>.*)\)$'
named_node_insert_pat = re.compile('INSERT' + named_node_pat_s)
insert_pat = re.compile('INSERT' + pat_s)
named_node_delete_pat = re.compile('DELETE' + named_node_pat_s)
delete_pat = re.compile('DELETE' + pat_s)
def read_delete_insert_info(fname):
logger.info('reading "{}"'.format(fname))
deletes = []
inserts = []
try:
f = open(fname)
for line in f:
line = line.rstrip()
m = named_node_delete_pat.search(line)
if m:
size = int(m.group('size'))
name = m.group('name')
if name and size > read_delete_insert_info_size_threshold:
excluded = get_excluded(m.group('exc'))
elems = Fragment(m.group('elems'))
rest = m.group('rest')
loc = m.group('loc')
kind = m.group('kind') + '|' + rest
gnid = int(m.group('gnid'))
r = {'loc':loc,'size':size,'kind':kind,'name':name,'gnid':gnid,'excluded':excluded,'elems':elems}
deletes.append(r)
else:
m = delete_pat.search(line)
if m:
size = int(m.group('size'))
if size > read_delete_insert_info_size_threshold:
kind = m.group('kind')
loc = m.group('loc')
gnid = int(m.group('gnid'))
excluded = get_excluded(m.group('exc'))
elems = Fragment(m.group('elems'))
r = {'loc':loc,'size':size,'kind':kind,'name':None,'gnid':gnid,'excluded':excluded,'elems':elems}
deletes.append(r)
m = named_node_insert_pat.search(line)
if m:
size = int(m.group('size'))
name = m.group('name')
if name and size > read_delete_insert_info_size_threshold:
excluded = get_excluded(m.group('exc'))
elems = Fragment(m.group('elems'))
rest = m.group('rest')
loc = m.group('loc')
kind = m.group('kind') + '|' + rest
gnid = int(m.group('gnid'))
r = {'loc':loc,'size':size,'kind':kind,'name':name,'gnid':gnid,'excluded':excluded,'elems':elems}
inserts.append(r)
else:
m = insert_pat.search(line)
if m:
size = int(m.group('size'))
if size > read_delete_insert_info_size_threshold:
kind = m.group('kind')
loc = m.group('loc')
gnid = int(m.group('gnid'))
excluded = get_excluded(m.group('exc'))
elems = Fragment(m.group('elems'))
r = {'loc':loc,'size':size,'kind':kind,'name':None,'gnid':gnid,'excluded':excluded,'elems':elems}
inserts.append(r)
f.close()
except IOError as e:
logger.warning(str(e))
return (deletes, inserts)
map_pat = re.compile('(?P<kind>R|E)\[#([0-9]+)U:#(?P<gi1>[0-9]+)G\](?P<lab1>.*)\[(?P<loc1>.*)\] -- \[#([0-9]+)U:#(?P<gi2>[0-9]+)G\](?P<lab2>.*)\[(?P<loc2>.*)\]')
def read_map_info(info, swapped=False):
map_file_not_found = True
gi_map = []
relabeled_gis = []
empty_map = True
opener = open
if os.path.exists(info):
pass
else: # maybe compressed
info = info + '.gz'
opener = gzip.open
try:
f = opener(info)
map_file_not_found = False
for line in f:
m = map_pat.search(line)
if m:
empty_map = False
gi1 = int(m.group('gi1'))
gi2 = int(m.group('gi2'))
kind = m.group('kind')
lab1 = (m.group('lab1'))
lab2 = (m.group('lab2'))
loc1 = (m.group('loc1'))
loc2 = (m.group('loc2'))
if swapped:
gi_map.append((gi2, gi1))
if kind == 'R':
relabeled_gis.append(gi2)
else:
gi_map.append((gi1, gi2))
if kind == 'R':
relabeled_gis.append(gi1)
f.close()
except BaseException as e:
logger.warning(str(e))
if map_file_not_found:
gi_map = None
relabeled_gis = None
if empty_map:
logger.warning('empty map: "{}"'.format(info))
return (gi_map, relabeled_gis)
lmap_pat = re.compile('(R|E)\[(?P<loc1>[0-9]+L.*)\].* -- .*\[(?P<loc2>[0-9]+L.*)\]')
def read_lmap_info(info, swapped=False):
result = []
try:
f = open(info)
for line in f:
m = lmap_pat.search(line)
if m:
loc1 = m.group('loc1')
loc2 = m.group('loc2')
if swapped:
result.append((loc2, loc1))
else:
result.append((loc1, loc2))
f.close()
except Exception as e:
logger.warning(str(e))
return result
def test(mapfile):
(gi_map, relabeled_gis) = read_map_info('map.gz')
print('gindex map read: size={}'.format(len(gi_map)))
print('{} relabeled gindexes found'.format(len(relabeled_gis)))
if __name__ == '__main__':
test('map.gz')
|
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
data = pd.read_csv("../results/master.csv")
data = pd.read_csv("../data/FoldX_predictions.csv")
x = list(data["ddG"])
y = list(data["FoldX_dGG"])
#clean # XXX:
import itertools
#lists = sorted(zip(*[x, y]))
#x, y = list(zip(*lists))
#x = x[:10]
#y = y[:10]
for i in range(len(x)):
x[i] = float(x[i])
print(y)
print(x)
x = np.array(x)
y = np.array(y)
data = {"x":x,"y":y}
plt.scatter("x","y", data=data, label=None)
plt.plot(x,y,"o")
plt.ylabel("Predicted ddG")
plt.xlabel("Experimental ddG")
x = np.array(x)
#plt.xticks(np.arange(x.min(), x.max(), 0.5))
corr = np.corrcoef(x, y)[0,1]
plt.text(-2.5, 7, 'Spearman correlation \ncoefficent: '+str(round(corr,3)))
print(corr)
m, b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b, label="Best Fit")
plt.text(3.3, -1.3, 'slope = '+str(round(m,2)))
plt.text(3.3, -1.7, 'y-intercept = '+str(round(b,2)))
x_hori = list(np.arange(-10,10, 0.5))
y_hori = list(np.arange(-10,10, 0.5))
plt.plot(x_hori, y_hori, linestyle="dashed", label="Ideal")
plt.ylim(-3,8)
plt.xlim(-3,6)
plt.legend(loc="upper right")
plt.title("New dataset (ProTherm+HotMusic)")
plt.show()
print(x)
print(y)
def check_accuracy(threshold):
true_positive = 0
false_positive = 0
true_negative = 0
false_negative = 0
for i in range(data.shape[0]):
if data.loc[i, "ddG"] >= threshold: #Positve
if data.loc[i, "FoldX_predictions"] >= threshold:
true_positive = true_positive + 1
elif data.loc[i, "FoldX_predictions"] <= threshold:
false_positive = false_positive + 1
else:
exit()
else: #negative
if data.loc[i, "FoldX_predictions"] <= threshold:
true_negative = true_negative + 1
elif data.loc[i, "FoldX_predictions"] >= threshold:
false_negative = false_negative + 1
else:
exit()
return [true_positive, false_positive, true_negative, false_negative]
def check_accuracy(threshold, x, y):
true_positive = 0
false_positive = 0
true_negative = 0
false_negative = 0
for i in range(len(x)):
if float(x[i]) >= threshold: #Positve
if y[i] >= threshold:
true_positive = true_positive + 1
elif y[i] <= threshold:
false_positive = false_positive + 1
else:
exit()
else: #negative
if y[i] <= threshold:
true_negative = true_negative + 1
elif y[i] >= threshold:
false_negative = false_negative + 1
else:
exit()
return [true_positive, false_positive, true_negative, false_negative]
results = []
thresholds = list(np.arange(-10,10, 0.1))
print(thresholds)
for threshold in thresholds:
results.append(check_accuracy(threshold, x, y))
print(threshold)
pass
print(results)
x = []
y = []
for i, result in enumerate(results):
print(result)
try:
x.append(result[1] / (result[1] + result[2]))
y.append(result[0] / (result[0] + result[3]))
except:
x.append(np.nan)
y.append(np.nan)
print(x)
for i in range(len(x)):
print(i, "----")
print(x[i])
print(results[i])
x_hori = list(np.arange(0,1.1, 0.1))
y_hori = list(np.arange(0,1.1, 0.1))
TOI = [100,103, 105, 107, 110, 112, 118, 120]
plt.figure(figsize = (6,6))
for threshold in TOI:
plt.text(x[threshold] - 0.06, y[threshold] + 0.01, str(round(thresholds[threshold],3)))
#print(thresholds[threshold], threshold)
plt.plot(x,y)
plt.plot(x_hori, y_hori, linestyle="dashed")
plt.xlabel("False Positive Rate")
plt.ylabel("True Postive Rate")
plt.xlim(0,1)
plt.ylim(0,1)
plt.title("ROC curve of FoldX predictions of ddG with relation\nto varying ddG threshold (HotMusic dataset)")
for threshold in TOI:
plt.scatter(x[threshold], y[threshold], c="b")
plt.show()
|
# -*- coding: utf-8 -*-
"""
Main training file for the CRF.
This file trains a CRF model and saves it under the filename provided via an 'identifier' command
line argument.
Usage example:
python train.py --identifier="my_experiment"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import random
import pycrfsuite
from model.datasets import load_windows, load_articles, generate_examples
import model.features as features
# All capitalized constants come from this file
import config as cfg
random.seed(42)
def main():
"""This function handles the command line arguments and then calls the train() method."""
parser = argparse.ArgumentParser()
parser.add_argument("--identifier", required=True,
help="A short name/identifier for your experiment, e.g. 'ex42b'.")
args = parser.parse_args()
train(args)
def train(args):
"""Main training method.
Does the following:
1. Create a new pycrfsuite trainer object. We will have to add feature chains and label
chains to that object and then train on them.
2. Creates the feature (generators). A feature generator might e.g. take in a window
of N tokens and then return ["upper=1"] for each token that starts with an uppercase
letter and ["upper=0"] for each token that starts with a lowercase letter. (Lists,
because a token can be converted into multiple features by a single feature generator,
e.g. the case for LDA as a token may be part of multiple topics.)
3. Loads windows from the corpus. Each window has a fixed (maximum) size in tokens.
We only load windows that contain at least one label (named entity), so that we don't
waste too much time on windows without any label.
4. Generate features for each chain of tokens (window). That's basically described in (2.).
Each chain of tokens from a window will be converted to a list of lists.
One list at the top level representing each token, then another list for the feature
values. E.g.
[["w2v=123", "bc=742", "upper=0"], ["w2v=4", "bc=12", "upper=1", "lda4=1"]]
for two tokens.
5. Add feature chains and label chains to the trainer.
6. Train. This may take several hours for 20k windows.
Args:
args: Command line arguments as parsed by argparse.ArgumentParser.
"""
trainer = pycrfsuite.Trainer(verbose=True)
# Create/Initialize the feature generators
# this may take a few minutes
print("Creating features...")
feature_generators = features.create_features()
# Initialize the window generator
# each window has a fixed maximum size of tokens
print("Loading windows...")
windows = load_windows(load_articles(cfg.ARTICLES_FILEPATH), cfg.WINDOW_SIZE,
feature_generators, only_labeled_windows=True)
# Add chains of features (each list of lists of strings)
# and chains of labels (each list of strings)
# to the trainer.
# This may take a long while, especially because of the lengthy POS tagging.
# POS tags and LDA results are cached, so the second run through this part will be significantly
# faster.
print("Adding example windows (up to max %d)..." % (cfg.COUNT_WINDOWS_TRAIN))
examples = generate_examples(windows, nb_append=cfg.COUNT_WINDOWS_TRAIN,
nb_skip=cfg.COUNT_WINDOWS_TEST, verbose=True)
for feature_values_lists, labels in examples:
trainer.append(feature_values_lists, labels)
# Train the model
# this may take several hours
print("Training...")
if cfg.MAX_ITERATIONS is not None and cfg.MAX_ITERATIONS > 0:
# set the maximum number of iterations of defined in the config file
# the optimizer stops automatically after some iterations if this is not set
trainer.set_params({'max_iterations': cfg.MAX_ITERATIONS})
trainer.train(args.identifier)
# ----------------
if __name__ == "__main__":
main()
|
class ServiceCentre(object):
"""
An information store for each service centre in the queueing network.
Contains all information that is independent of customer class:
- number of servers
- queueing capacity
- server schedules + preemtion status
- class change matrix
"""
def __init__(self,
number_of_servers,
queueing_capacity,
class_change_matrix=None,
schedule=None,
preempt=False,
ps_threshold=1):
"""
Initialises the ServiceCentre object.
"""
self.number_of_servers = number_of_servers
self.queueing_capacity = queueing_capacity
self.class_change_matrix = class_change_matrix
self.schedule = schedule
self.preempt = preempt
self.ps_threshold = ps_threshold
class CustomerClass(object):
"""
An information store for each customer class in the queueing network.
Contains all information that is dependent on customer class:
- arrival distributions
- service distributions
- routing matrices/functions
- priority class
- baulking functions
- batching distributions
"""
def __init__(self,
arrival_distributions,
service_distributions,
routing,
priority_class,
baulking_functions,
batching_distributions):
"""
Initialises the CutomerCass object.
"""
self.arrival_distributions = arrival_distributions
self.service_distributions = service_distributions
self.batching_distributions = batching_distributions
self.routing = routing
self.priority_class = priority_class
self.baulking_functions = baulking_functions
class Network(object):
"""
An information store the queueing network.
Contains a list of ServiceCentre objects for each
service centre, and a list of CustomerClass objects
for each customer class.
"""
def __init__(self, service_centres, customer_classes):
"""
Initialises the Network object
"""
self.service_centres = service_centres
self.customer_classes = customer_classes
self.number_of_nodes = len(service_centres)
self.number_of_classes = len(customer_classes)
self.number_of_priority_classes = len(set([clss.priority_class for clss in customer_classes]))
self.priority_class_mapping = {i: clss.priority_class for i, clss in enumerate(customer_classes)}
|
"" "Este módulo implementa o objeto jogador (sprite) para o Progmind" ""
from src.animation import Animation
from src.animated_sprite import AnimatedSprite
from src.time_bonus import TimeBonus
import src.game_functions as gf
import pygame
import time
class Player(AnimatedSprite):
"""Objeto de jogador"""
def __init__(self, settings, screen, images, initial_bounding_rect, tile_map):
"""Inicialize o sprite do jogador"""
# Calls AnimatedSprite, which in turn will call pygame.Sprite __init_()
super().__init__(settings, screen, images)
self.tile_map = tile_map
# Substituir a posição inicial
self.initial_bounding_rect = initial_bounding_rect
self.rect.bottom = initial_bounding_rect.bottom
self.rect.left = self.screen.get_rect().width / 2
# Defina as margens transparentes
self.margin_left = self.settings.player_sprite_horz_margin
self.margin_right = self.settings.player_sprite_horz_margin
self.margin_top = self.settings.player_sprite_top_margin
# definir o retorno de chamada de verificação de colisão opcional
self.collision_check = self.collided
# Estes são específicos para o objeto do jogador
self.air_jumps = 0
self.max_air_jumps = settings.player_max_air_jumps
self.idle_top = False
self.idle_counter = 0
self.won_level = False
self.at_top = False
# Adicione as animações para o jogador
self.animations[self.settings.anim_name_idle_left] = Animation([0, 1, 2, 3, 2, 1], 5)
self.animations[self.settings.anim_name_idle_right] = Animation([5, 6, 7, 8, 7, 6], 5)
self.animations[self.settings.anim_name_walk_left] = Animation([0, 10, 11, 10], 2)
self.animations[self.settings.anim_name_walk_right] = Animation([5, 12, 13, 12], 2)
self.animations[self.settings.anim_name_jump_up_left] = Animation([15], 5)
self.animations[self.settings.anim_name_jump_down_left] = Animation([16], 5)
self.animations[self.settings.anim_name_jump_up_right] = Animation([17], 5)
self.animations[self.settings.anim_name_jump_down_right] = Animation([18], 5)
self.animations[self.settings.anim_name_dead] = Animation([4], 5)
self.current_animation = self.settings.anim_name_idle_left
self.facing_left = True
def reset(self):
"""Redefina o objeto do jogador para o mapa"""
player = self
player.rect.bottom = self.initial_bounding_rect.bottom
player.dx = 0.0
player.dy = 0.0
player.dying = False
player.idle_counter = 0
player.idle_top = False
player.won_level = False
player.at_top = False
def update_current_animation(self):
"""Defina a animação correta com base no estado"""
# DEAD
if self.idle_top:
self.set_current_animation(self.settings.anim_name_idle_left)
elif self.dying:
self.set_current_animation(self.settings.anim_name_dead)
# IDLE
elif self.dx == 0 and self.dy == 0:
if self.facing_left:
self.set_current_animation(self.settings.anim_name_idle_left)
else:
self.set_current_animation(self.settings.anim_name_idle_right)
# WALKING
elif self.dy == 0:
if self.dx < 0:
self.set_current_animation(self.settings.anim_name_walk_left)
else:
self.set_current_animation(self.settings.anim_name_walk_right)
# JUMPING
else:
pygame.mixer.init()
sounda= pygame.mixer.Sound("jumpland.wav")
sounda.set_volume(0.05)
sounda.play()
if self.dy < 0:
if self.facing_left:
self.set_current_animation(self.settings.anim_name_jump_up_left)
else:
self.set_current_animation(self.settings.anim_name_jump_up_right)
else:
if self.facing_left:
self.set_current_animation(self.settings.anim_name_jump_down_left)
else:
self.set_current_animation(self.settings.anim_name_jump_down_right)
def collided(self, player, sprite):
"""Este retorno de chamada é usado para modificar a verificação de colisão básica para o sprite do jogador"""
if sprite.dying:
return False
player_rect = player.rect.copy()
# reduza o retângulo do jogador com base nas margens
player_rect.height -= player.settings.player_sprite_top_margin
player_rect.width -= (player.settings.player_sprite_horz_margin * 2)
player_rect.midbottom = player.rect.midbottom
# Agora faça uma verificação padrão com o Rect ajustado
return player_rect.colliderect(sprite.rect)
def update(self, tile_map, enemies):
"""Atualiza a posição do jogador sprite"""
if not self.dying:
# Verifique se estamos na linha superior
if self.idle_top:
self.idle_counter = 0
if self.idle_counter > (30 * 3):
self.won_level = False
# O AnimatedSprite lida com a maior parte disso, mas salve o Grupo de inimigos atuais para o manipulador
self.enemies = enemies
super().update(tile_map, tile_map.block_group)
if self.dy == 0:
self.air_jumps = 0
# O jogador também precisa verificar o grupo de sprites inimigos
intersected_blobs = pygame.sprite.spritecollide(self, enemies, False, self.collision_check)
if intersected_blobs:
self.dying = True
self.dy = -15
self.falling = True
self.falling_frames = 1
player_idle = ((self.current_animation == self.settings.anim_name_idle_left) or (self.current_animation == self.settings.anim_name_idle_right))
player_walking = ((self.current_animation == self.settings.anim_name_walk_left) or (self.current_animation == self.settings.anim_name_walk_right))
if (self.rect.bottom <= tile_map.player_bounds_rect.top + 2 * self.settings.tile_height) and (player_idle or player_walking):
self.idle_top = False
self.at_top = True
self.idle_counter = 0
else:
if self.rect.top > self.screen_rect.bottom:
# Por enquanto, apenas reinicie a posição do jogador, mas nada mais
self.rect.bottom = tile_map.player_bounds_rect.bottom
self.dx = 0.0
self.dy = 0.0
self.dying = False
else:
if self.dy < self.settings.terminal_velocity:
self.dy += self.settings.gravity
self.rect.centery += self.dy
# pygame.mixer.init()
# som= pygame.mixer.Sound("não consegue né.wav")
# som.set_volume(0.1)
# som.play()
self.falling_frames += 1
self.finish_update()
def handle_collision(self, collision_list, group):
"""Dada uma lista de sprites que colidem com o jogador, altere o estado, como posição, velocidade, etc."""
# Mesmo que seja uma lista, o primeiro item deve ser tudo de que precisamos por agora
if collision_list:
block = collision_list[0]
#isso é uma colisão lateral?
side_collision = self.rect.right > block.rect.right or self.rect.left < block.rect.left
# Queda é o caso padrão, então verifique primeiro
if self.dy > 0:
self.falling = False
self.falling_frames = 1
self.air_jumps = 0
self.dy = 0
self.rect.bottom = block.rect.top
# Se o jogador estiver pulando, verifique se há um acerto menor
elif self.dy < 0:
if (self.rect.left > 450 and self.rect.left < 600):
if (self.rect.top >= 464 and self.rect.top < 470):
self.settings.resposta_1 = '';
if self.settings.resposta_1_correta:
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if (self.rect.left > 620 and self.rect.left < 730):
if (self.rect.top >= 464 and self.rect.top < 477):
self.settings.resposta_2 = ''
if self.settings.resposta_2_correta:
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if (self.rect.left > 450 and self.rect.left < 600):
if (self.rect.top >= 320 and self.rect.top < 328):
self.settings.resposta_3 = ''
if self.settings.resposta_3_correta:
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if (self.rect.left > 620 and self.rect.left < 730):
if (self.rect.top >= 320 and self.rect.top < 328):
self.settings.resposta_4 = ''
if self.settings.resposta_4_correta:
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if (self.rect.left > 450 and self.rect.left < 600):
if (self.rect.top >= 170 and self.rect.top < 185):
self.settings.resposta_5 = ''
if self.settings.resposta_5_correta:
self.settings.resposta_5 = ''
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if (self.rect.left > 620 and self.rect.left < 730):
if (self.rect.top >= 170 and self.rect.top < 185):
self.settings.resposta_6 = ''
if self.settings.resposta_6_correta:
self.settings.resposta_6 = ''
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if (self.rect.left > 480 and self.rect.left < 600):
if (self.rect.top > 25 and self.rect.top < 40):
self.settings.resposta_7 = ''
if self.settings.resposta_7_correta:
self.settings.resposta_7 = ''
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if (self.rect.left > 620 and self.rect.left < 730):
if (self.rect.top > 25 and self.rect.top < 40):
self.settings.resposta_8 = ''
if self.settings.resposta_8_correta:
self.settings.resposta_8 = ''
self.settings.contador_nivel += 1
if self.settings.level_number < self.settings.desafio_Medio:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil:
if self.settings.contador_nivel == 2:
self.settings.desafio_concluido = True
self.won_level = True
if self.settings.level_number >= self.settings.desafio_Dificil:
self.settings.desafio_concluido = True
self.won_level = True
if self.rect.bottom > block.rect.bottom:
self.dy = 0
self.rect.top = block.rect.bottom - self.settings.player_sprite_top_margin
# remova os blocos atingidos pela parte inferior
group.remove(collision_list)
# remova os inimigos acima desses blocos
self.remove_enemies_above_blocks(collision_list)
# Agora verifique a esquerda
elif self.dx > 0:
if side_collision:
self.dx = 0
self.rect.right = block.rect.left + self.settings.player_sprite_horz_margin
elif self.dx < 0:
if side_collision:
self.dx = 0
self.rect.left = block.rect.right - self.settings.player_sprite_horz_margin
def remove_enemies_above_blocks(self, collision_list):
# construir um kill rect para verificar os inimigos
kill_rect = collision_list[0].rect
for sprite in collision_list:
kill_rect.union_ip(sprite.rect)
#Subir um bloco
kill_rect.move_ip(0, collision_list[0].rect.height * -1)
# Agora veja se algum inimigo está neste bloco
for enemy in self.enemies:
if kill_rect.colliderect(enemy.rect):
enemy.dying = True
enemy.dy = self.settings.enemy_death_dy
bonus = TimeBonus(enemy.rect, "ACERTOU!!!", 500, self.tile_map.level_timer, self.settings.bonus_font)
self.tile_map.bonuses.append(bonus)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.