repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
|---|---|---|---|
catalyst-cooperative/pudl
|
notebooks/work-in-progress/CEMS_by_utility.ipynb
|
mit
|
# Standard libraries
import logging
import os
import pathlib
import sys
# 3rd party libraries
import geopandas as gpd
import geoplot as gplt
import dask.dataframe as dd
from dask.distributed import Client
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
import sqlalchemy as sa
import re
# Local libraries
import pudl
pudl_settings = pudl.workspace.setup.get_defaults()
#display(pudl_settings)
ferc1_engine = sa.create_engine(pudl_settings['ferc1_db'])
#display(ferc1_engine)
pudl_engine = sa.create_engine(pudl_settings['pudl_db'])
#display(pudl_engine)
#pudl_engine.table_names()
pudl_out = pudl.output.pudltabl.PudlTabl(pudl_engine)
"""
Explanation: Aggregate CEMS data at the utility-plant level for RMI
End of explanation
"""
fpl_id = 6452
duke_id = 5416
util_id = duke_id
"""
Explanation: Identify the utilities you'd like to get information on
End of explanation
"""
# Read master unit list
mul = pd.read_pickle('/Users/aesharpe/Desktop/Work/Catalyst_Coop/master_unit_list.pkl.gz')
# Get the ownership fractions at the generator level
gen_mul = mul[(mul['plant_part']=='plant_gen') & (mul['ownership']=='owned')].copy()
gen_mul = (
gen_mul.rename(columns={'report_year': 'year'})
.reset_index()
[['plant_id_eia', 'generator_id', 'plant_name_eia',
'year', 'fraction_owned', 'utility_id_eia', 'net_generation_mwh',
'capacity_mw', 'fuel_type_code_pudl']]
.drop_duplicates()
)
# Combine with EPA-EIA mapping
eia_epa_map = pd.read_csv('/Users/aesharpe/Desktop/Work/Catalyst_Coop/EPA-EIA-Unit-Crosswalk/eia_epa_id_crosswalk.csv')
eia_epa = eia_epa_map[['plant_id_epa', 'plant_id_eia', 'unitid', 'generator_id', 'fuel_type_primary']].copy()
gen_mul_map = pd.merge(gen_mul, eia_epa, on=['plant_id_eia', 'generator_id'], how='outer')
gen_mul_fracs = (
gen_mul_map.assign(
net_gen_plant_sum=(
lambda x: x.groupby(['utility_id_eia', 'plant_id_eia', 'year']).net_generation_mwh.transform('sum', min_count=1)),
net_gen_unit_sum=(
lambda x: x.groupby(['utility_id_eia', 'unitid', 'year']).net_generation_mwh.transform('sum', min_count=1)),
cap_plant_sum=(
lambda x: x.groupby(['utility_id_eia', 'plant_id_eia', 'year']).capacity_mw.transform('sum', min_count=1)),
cap_unit_sum=(
lambda x: x.groupby(['utility_id_eia', 'unitid', 'year']).capacity_mw.transform('sum', min_count=1)),
fraction_owned=lambda x: x.fraction_owned.fillna(1),
fraction_owned_cap_plant=lambda x: x.fraction_owned * x.capacity_mw / x.cap_plant_sum,
fraction_owned_cap_unit=lambda x: x.fraction_owned * x.capacity_mw / x.cap_unit_sum,
plant_id_eia=lambda x: x.plant_id_eia.astype('Int64'),
year=lambda x: x.year.astype('Int64')
)
)
# Only keeps entries for the given utility
util_gen_mul_fracs = gen_mul_fracs[gen_mul_fracs['utility_id_eia']==util_id]
util_plants = list(set((util_gen_mul_fracs.plant_id_eia)))
"""
Explanation: Read the Master Unit List (MUL) for utility fraction owned data
End of explanation
"""
# # CEMS
# years = range(2009, 2020)
# cems_df = pd.DataFrame()
# client = Client()
# my_cols = [
# 'state',
# 'plant_id_eia',
# 'unitid',
# 'gross_load_mw',
# 'operating_datetime_utc',
# ]
# for yr in years:
# print(f'starting calculation for {yr}')
# epacems_path = (pudl_settings['parquet_dir'] + f'/epacems/year={yr}')
# cems_dd = (
# dd.read_parquet(epacems_path, columns=my_cols)
# .assign(state=lambda x: x['state'].astype('string'))
# )
# cems_dd_util = cems_dd[cems_dd['plant_id_eia'].isin(util_plants)]
# cems_df_util = (
# client.compute(cems_dd_util)
# .result()
# .assign(year=yr))
# cems_df = (
# pd.concat([cems_df, cems_df_util])
# #.rename(columns={'unitid': 'generator_id'})
# )
#cems_df.to_pickle('/Users/aesharpe/Desktop/duke_cems.pkl')
#cems_df.to_pickle('/Users/aesharpe/Desktop/fpl_cems.pkl')
# Load CEMS pickle files
cems_duke = pd.read_pickle('/Users/aesharpe/Desktop/duke_cems.pkl')
cems_fpl = pd.read_pickle('/Users/aesharpe/Desktop/fpl_cems.pkl')
# Determine which utility to use
cems_df = cems_duke
tech_df = pd.read_csv('/Users/aesharpe/Desktop/epa_ampd_annual_emissions_data.csv')
col_list = list(tech_df.columns)
col_list = [col.replace(' ', '_').lower() for col in col_list]
col_list = [col.replace('(', '') for col in col_list]
col_list = [col.replace(')', '') for col in col_list]
tech_df.columns = [re.sub(r'^_', '', col) for col in col_list]
tech_df = tech_df[['facility_name', 'facility_id_orispl', 'unit_id', 'year', 'fuel_type_primary', 'unit_type']]
tech_df['unit_type'] = tech_df['unit_type'].fillna('UNK')
cems_tech = (
pd.merge(
cems_df,
tech_df,
left_on=['plant_id_eia', 'unitid', 'year'],
right_on=['facility_id_orispl', 'unit_id', 'year'],
how='left'
).assign(
unit_type=lambda x: [re.sub(r' \([A-Za-z , \d]*\)', '', l) for l in x.unit_type.fillna('UNK')]
)
)
"""
Explanation: Prep CEMS for Utility integration (only run this to make new pickle files!
End of explanation
"""
# List which plants are not included in the EIA-EPA mapping (and therefore need to be aggregated at the
# plant vs. unit level for CEMS integration)
cems_plant_list = cems_df['plant_id_eia'].unique()
map_plant_list = list(eia_epa.plant_id_eia.unique())
missing_from_map = [plant for plant in cems_plant_list if plant not in map_plant_list]
# Separate into those that can aggregate by unit vs those that must aggregate by plant.
cems_unit = cems_tech[~cems_tech['plant_id_eia'].isin(missing_from_map)].copy()
cems_unit_missing = cems_tech[cems_tech['plant_id_eia'].isin(missing_from_map)].copy()
# Merge with CEMS
cems_mul_unit = pd.merge(cems_unit, util_gen_mul_fracs, on=['plant_id_eia', 'unitid', 'year'], how='left')
cems_mul_missing = (
pd.merge(cems_unit_missing, util_gen_mul_fracs, on=['plant_id_eia', 'year'], how='left')
.drop('unitid_y', axis=1)
.rename(columns={'unitid_x': 'unitid'})
)
"""
Explanation: Combine CEMS with MUL fraction owned data
End of explanation
"""
# For plants where generator level information is available in EPA-EIA conversion
cems_mul_gl_fraction_unit = (
cems_mul_unit.assign(
gross_load_mw_fraction_owned_cap=lambda x: x.fraction_owned_cap_unit * x.gross_load_mw,
plant_id_eia=lambda x: x.plant_id_eia.astype('Int64'),
fraction_owned_agg_level='unit'
).drop_duplicates(subset=['plant_id_eia', 'unitid', 'operating_datetime_utc'])
)
# For plants not available in EPA-EIA conversion
cems_mul_gl_fraction_plant = (
cems_mul_missing.assign(
gross_load_mw_fraction_owned_cap=lambda x: x.fraction_owned_cap_plant * x.gross_load_mw,
plant_id_eia=lambda x: x.plant_id_eia.astype('Int64'),
fraction_owned_agg_level='plant'
).drop_duplicates(subset=['plant_id_eia', 'unitid', 'operating_datetime_utc'])
)
def backfill_tech_description(ser):
"""Backfill tech description if technology is all the same except for some NA values"""
ser = ser.replace({None: np.nan})
types = list(ser.unique())
if np.nan in types:
types.remove(np.nan)
if len(types) == 1:
ser.values[:] = types[0]
return ser
# Merge back together
cems_mul_final = (
pd.concat([
cems_mul_gl_fraction_unit,
cems_mul_gl_fraction_plant])
.dropna(subset=['operating_datetime_utc'])
.assign(
fuel_type_code_pudl=lambda x: backfill_tech_description(x.fuel_type_code_pudl),
fuel_type_primary_x=lambda x: backfill_tech_description(x.fuel_type_primary_x),
unit_type=lambda x: backfill_tech_description(x.unit_type))
.rename(columns={'fuel_type_primary_x': 'fuel_type_primary'})
)
cems_mul_final = (
cems_mul_final[[
'plant_id_eia', 'unitid', 'gross_load_mw_fraction_owned_cap',
'facility_name', 'fuel_type_primary', 'unit_type', 'operating_datetime_utc',
'fraction_owned_agg_level'
]].copy()
)
# Pivot table so there aren't as many rows
cems_mul_piv = (
cems_mul_final.pivot(
columns=['facility_name', 'plant_id_eia', 'fraction_owned_agg_level', 'unitid', 'fuel_type_primary', 'unit_type'],
index=['operating_datetime_utc'])
.sort_index(axis=1, level=[0])
)
cems_mul_piv.columns.levels[3]
cems_mul_piv.to_csv('CEMS_Duke_gross_load.csv')
"""
Explanation: Calculate the fraction of gross load owned by the utility (either at the unit or plant level)
End of explanation
"""
|
pacificclimate/climate-explorer-netcdf-tests
|
notebooks/storage-requirements.ipynb
|
gpl-3.0
|
import sys
sys.path.append('../util')
import numpy as np
from matplotlib import pyplot as plt, ticker
import matplotlib.patheffects as path_effects
from mpl_toolkits.mplot3d import Axes3D
pe = [path_effects.Stroke(linewidth=2, foreground='black'), path_effects.Normal()]
timescales = {
# Number of time steps in the series for a given time scale
'climatological': 17, # monthly, seasonal, annual means over the whole time period
'seasonal': 5 * 150, # four seasons + an annual mean
'annual': 150, # 1950 - 2100
'monthly': 12 * 150,
'daily': 365 * 150 # approximately
}
%matplotlib inline
"""
Explanation: Estimating storage requirements
Applications which depend on high-resolution, spatiotemporal data can potentially have high data storage requirements. The storage requirements are one of the primary factors that drive the cost of deployment and execution of the Software as a Service (SaaS).
To some extent the required storage (and cost) is co-dependent with the application requirements. I.e. the cost of various options may inform whether we choose to take them on and visa-versa; what exactly the application does will affect the ongoing costs.
There are a number of factors that affect the data requirements:
Spatial resolution and domain
Number of timesteps/timescales
Number of variables
Number of models
In general, the data requirements grows as a product of all of these factors and a linear increase in one factor will result in a linear increase in the cost. Time and space resolution options both grow somewhat exponentially, so likewise the cost can grow as such based on the options selected. There can be several orders of magnitude differences between conservative choices and more full-featured choices.
Let's explore some of the options.
End of explanation
"""
sizeof_float = 4
"""
Explanation: Let's start by enumerating some of the sets of options that we would be considering. We'll assume that all numbers are stored as 32-bit IEEE floating point numbers.
End of explanation
"""
spatial = np.array((128 * 64, 128 * 256, 510 * 1068, 1680 * 3241))
"""
Explanation: Let's consider the range of grid sizes from a low resolution (250 km / grid cell) global GCM, up to the high-resolution (400 m / grid cell) grid over BC. The BC PRISM grid represents the largest grid that we commonly use.
End of explanation
"""
variables = np.array((3, 8))
"""
Explanation: Our range of variables doesn't vary much. Either we're just using minimum/maximum temperature and precipitation (3 variables), or we're including a few other derived variables. The ones that seem to come up the most are tas, pas, gdd, hdd, ffd, for a total of 8.
End of explanation
"""
timesteps = np.array(sorted(list(timescales.values())))
"""
Explanation: Time ranges vary quite widely. On the low-end, if we only store climatological data (i.e. 30 year averages), we only store 17 timesteps: an annual mean, 4 seasonal means, and 12 monthly means. Other options include, an annual timeseries, monthly timeseries, daily timeseries. A daily timeseries includes about 55 thousand timesteps, so that essentially multiplies the data requirements by a factor of 3000.
End of explanation
"""
models = np.array([36, 50, 100, 250])
"""
Explanation: There are many global climate modelling agencies and a number of scenarios that are modeled. One could only include analysis for a small number of them. For example our minumum is usally the "PCIC 12" for 3 RCP scenarios, so 36. This could go up to a few hundred. I've capped it at 250.
End of explanation
"""
print(spatial)
print(variables)
print(timesteps)
print(models)
"""
Explanation: So the full range of options looks something like the set below. That'a lot of options, so I'll run through a few scenarios where I fix two variables, and vary the other two, to look and where a good tradeoff could be.
End of explanation
"""
# 3 variables at climatological scale
x = spatial
y = models
xx, yy = np.meshgrid(x, y)
z = xx * yy * 3 * 17 * sizeof_float / (2 ** 30)
plt.contourf(x,y,z)
plt.xlabel("Spatial pixels")
plt.ylabel("Number of models")
plt.title("Data requirements (in GB) for 3 variables with climatologies")
plt.colorbar()
"""
Explanation: The budget scenario
If we only consider using 3 variables (tmin, tmax, precip), and only allowing analysis with climatological values, then the data requirements are acutally really minimal and essentially cost tens of dollars per year (!). Limiting to climatological values gives us the latitude to use a high spatial resolution and consider pretty much the full range of models that are available, and still be insanely inexpensive.
This is the closest scenario that we have to how the present Regional Analysis Tool works.
End of explanation
"""
cost = z * 12 * .024
plt.contourf(x,y,cost)
plt.xlabel("Spatial pixels")
plt.ylabel("Number of models")
plt.title("Annual storage cost for 3 variables with climatologies")
plt.colorbar()
"""
Explanation: Costs
For all of these scenarios, I'll estimate costs by multiplying by Amazon's rate for S3 reduced redundancy storage. Reduced redundancy is appropriate, because all of this bulk data is stored elsewhere and is reproducible with relative ease. It does not need to be backed up; it just needs to be live for the application to hit it.
S3 Reduced Redundancy is priced at \$0.024 per GB per month for data volumes under 1 TB. It's slightly less for > 1 TB, but I'll just use a static scaling factor to keep it simple. One could use a different scaling factor to estimate other provider options (e.g. \$2.44 / GB / month for the government servers).
End of explanation
"""
# 3 variables at high-res BC prism scale
x = models
y = timesteps[:-1]
xx, yy = np.meshgrid(x, y)
z = xx * yy * 3 * 5444880 * sizeof_float / (2 ** 40)
plt.contourf(x,y,z)
plt.xlabel("Number of models")
plt.ylabel("Number of timesteps")
plt.title("Data requirements (in TB) for 3 variables at high-res BC PRISM scale")
for key, val in timescales.items():
if val <= 1800:
plt.axhline(val, color='white', label=key)
plt.text(100, val, key, color='white', path_effects=pe)
plt.colorbar()
cost = z * 1024 * 12 * .024 / 1000
plt.contourf(x,y,cost)
plt.xlabel("Number of models")
plt.ylabel("Number of timesteps")
plt.title("Annual storage cost (thousands) for 3 variables at high-res BC PRISM scale")
for key, val in timescales.items():
if val <= 1800:
plt.axhline(val, color='white', label=key)
plt.text(100, val, key, color='white', path_effects=pe)
plt.colorbar()
"""
Explanation: High spatial resolution, low variables
Let's assume that we want to work at a high spatial resolution. Infrastructure engineering projects are usually very site specific (a bridge, a dam, a ferry dock), so we'll usually be interested in the fine scale.
We'll consider the full range of models available, and timescales up to a monthly timeseries (i.e. not daily, because then things get ridiculous). If you do want an off-the cuff estimate for daily, just multiply the top Y range by 30.
Half of the chart is still under 10 TB, but the data requirements get bigger quickly as you consider more timescales. For example, once you get above seasonal data into monthly, one could be storing as much as 20+ TB of data.
The storage costs for these options range from $1-8k / year, so it's still very reasonable for the value that the data provide.
End of explanation
"""
# 8 derived variables for all 250 models
x = spatial
y = timesteps
xx, yy = np.meshgrid(x, y)
z = xx * yy * 8 * 250 * sizeof_float / (2 ** 40)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
img = ax.contourf(x,y,z, locator=ticker.LogLocator())
ax.set_yscale('log')
ax.set_xscale('log')
plt.xlabel("Number of spatial grid cells")
plt.ylabel("Number of timesteps")
plt.title("Data requirements (in TB) for 7 variables 250 models")
for key, val in timescales.items():
ax.axhline(val, color='white', label=key)
ax.text(10**5, val, key, color='white', path_effects=pe)
for val in spatial:
ax.axvline(val, color='white')
fig.colorbar(img)
cost = z * 1024 * 12 * .024
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
img = ax.contourf(x,y,cost, locator=ticker.LogLocator())
plt.xlabel("Number of spatial grid cells")
plt.ylabel("Number of timesteps")
plt.title("Annual storage cost for 8 variables 250 models")
ax.set_yscale('log')
ax.set_xscale('log')
for key, val in timescales.items():
ax.axhline(val, color='white', label=key)
text = ax.text(10**5, val, key, color='white', path_effects=pe)
for val in spatial:
ax.axvline(val, color='white')
plt.colorbar(img)
"""
Explanation: All models all dervived variables
Things get interesting when you start to consider the full range of variables, the full range of models and daily data. And by "interesting", I mean expensive.
The costs are pretty reasonable below the monthly timescale, but including daily puts the data requirements into the Petabytes range. Note the log scale on both axes and the colorbar.
End of explanation
"""
# 8 derived variables for all 250 models
x = models
y = spatial
xx, yy = np.meshgrid(x, y)
z = xx * yy * (8 * 150 + 31 * 150 + 11 * 1800) * sizeof_float / (2 ** 40)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_yscale('log')
img = ax.contourf(x,y,z)
plt.xlabel("Number of models")
plt.ylabel("Number of spatial grid cells")
plt.title("Data requirements (in TB) for ClimDEX data (proxy for highres time)")
for val in spatial:
ax.axhline(val, color='white')
ax.text(150, 128 * 64, 'world_250k', color='white', path_effects=pe)
ax.text(150, 128 * 256, 'world_125k', color='white', path_effects=pe)
ax.text(150, 510 * 1068, 'canada_5k', color='white', path_effects=pe)
ax.text(150, 1680 * 3241, 'bc_400m', color='white', path_effects=pe)
fig.colorbar(img)
cost = z * 1024 * 12 * .024 / 1000
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_yscale('log')
img = ax.contourf(x,y,cost)
plt.xlabel("Number of spatial grid cells")
plt.ylabel("Number of timesteps")
plt.title("Annual storage cost (thousands) ClimDEX proxy")
fig.colorbar(img)
for val in spatial:
ax.axhline(val, color='white')
path_effects = [path_effects.Stroke(linewidth=2, foreground='black'), path_effects.Normal()]
ax.text(150, 128 * 64, 'world_250k', color='white', path_effects=pe)
ax.text(150, 128 * 256, 'world_125k', color='white', path_effects=pe)
ax.text(150, 510 * 1068, 'canada_5k', color='white', path_effects=pe)
ax.text(150, 1680 * 3241, 'bc_400m', color='white', path_effects=pe)
"""
Explanation: Note that annual storage costs on the high end of the spectrum run into the millions.
Using ClimDEX to simulate extremes
One feasible alternative to storing dailiy climate model output is to precompute the ClimDEX indices and then commit to using only them for questions about extreme climate behaviour. The number of variables would be increased to somewhere around 38 (31 climdex + 3 fundamental + 4 derived). But the temporal scale would be limited to no more than monthly.
The calculation is a little more involved because some indices are monthly and some are annual and some are both. Let's say that there are 31 annual, and 11 are both monthly and annual. We'll fix the timescale and number of variables and evaluate the options for spatial grid and number of models.
End of explanation
"""
|
PMEAL/OpenPNM
|
examples/getting_started/intro_to_openpnm_basic.ipynb
|
mit
|
foo = dict() # Create an empty dict
foo['bar'] = 1 # Store an integer under the key 'bar'
print(foo['bar']) # Retrieve the integer stored in 'bar'
"""
Explanation: Tutorial 1 - Basic
Tutorial 1 of 3: Getting Started with OpenPNM
This tutorial is intended to show the basic outline of how OpenPNM works, and necessarily skips many of the more useful and powerful features of the package. So if you find yourself asking "why is this step so labor intensive" it's probably because this tutorial deliberately simplifies some features to provide a more smooth introduction. The second and third tutorials dive into the package more deeply, but those features are best appreciated once the basics are understood.
Learning Objectives
Introduce the main OpenPNM objects and their roles
Explore the way OpenPNM stores data, including network topology
Learn some handy tools for working with objects
Generate a standard cubic Network topology
Calculate geometrical properties and assign them to a Geometry object
Calculate thermophysical properties and assign to a Phase object
Define pore-scale physics and assign transport parameters to a Physics object
Run a permeability simulation using the pre-defined Algorithm
Use the package to calculate the permeability coefficient of a porous media
Python and Numpy Tutorials
Before diving into OpenPNM it is probably a good idea to become familar with Python and Numpy. The following resources should be helpful.
* OpenPNM is written in Python. One of the best guides to learning Python is the set of Tutorials available on the official Python website. The web is literally overrun with excellent Python tutorials owing to the popularity and importance of the language. The official Python website also provides a long list of resources
* For information on using Numpy, Scipy and generally doing scientific computing in Python checkout the Scipy lecture notes. The Scipy website also offers as solid introduction to using Numpy arrays.
* The Stackoverflow website is an incredible resource for all computing related questions, including simple usage of Python, Scipy and Numpy functions.
* For users more familiar with Matlab, there is a Matlab-Numpy cheat sheet that explains how to translate familiar Matlab commands to Numpy.
Overview of Data Storage in OpenPNM
Before creating an OpenPNM simulation it is necessary to give a quick description of how data is stored in OpenPNM; after all, a significant part of OpenPNM is dedicated to data storage and handling.
Python Dictionaries or dicts
OpenPNM employs 5 main objects which each store and manage a different type of information or data:
Network: Manages topological data such as pore spatial locations and pore-to-pore connections
Geometry: Manages geometrical properties such as pore diameter and throat length
Phase: Manages thermophysical properties such as temperature and viscosity
Physics: Manages pore-scale transport parameters such as hydraulic conductance
Algorithm: Contains algorithms that use the data from other objects to perform simulations, such as diffusion or drainage
We will encounter each of these objects in action before the end of this tutorial.
Each of the above objects is a subclass of the Python dictionary or dict, which is a very general storage container that allows values to be accessed by a name using syntax like:
End of explanation
"""
import scipy as sp
import numpy as np
import openpnm as op
%config InlineBackend.figure_formats = ['svg']
np.random.seed(10)
# Instantiate an empty network object with 10 pores and 10 throats
net = op.network.GenericNetwork(Np=10, Nt=10)
# Assign an Np-long array of ones
net['pore.foo'] = np.ones([net.Np, ])
# Assign an Np-long array of increasing ints
net['pore.bar'] = range(0, net.Np)
# The Python range iterator is converted to a proper Numpy array
print(type(net['pore.bar']))
net['pore.foo'][4] = 44.0 # Overwrite values in the array
print(net['pore.foo'][4]) # Retrieve values from the array
print(net['pore.foo'][2:6]) # Extract a slice of the array
print(net['pore.foo'][[2, 4, 6]]) # Extract specific locations
net['throat.foo'] = 2 # Assign a scalar
print(len(net['throat.foo'])) # The scalar values is converted to an Nt-long array
print(net['throat.foo'][4]) # The scalar value was placed into all locations
"""
Explanation: A detailed tutorial on dictionaries can be found here. The dict does not offer much functionality aside from basic storage of arbitrary objects, and it is meant to be extended. OpenPNM extends the dict to have functionality specifically suited for dealing with OpenPNM data.
Numpy Arrays of Pore and Throat Data
All data are stored in arrays which can accessed using standard array syntax.
All pore and throat properties are stored in Numpy arrays. All data will be automatically converted to a Numpy array if necessary.
The data for pore i (or throat i) can be found in element of i of an array. This means that pores and throat have indices which are implied by their position in arrays. When we speak of retrieving pore locations, it refers to the indices in the Numpy arrays.
Each property is stored in it's own array, meaning that 'pore diameter' and 'throat volume' are each stored in a separate array.
Arrays that store pore data are Np-long, while arrays that store throat data are Nt-long, where Np is the number of pores and Nt is the number of throats in the network.
Arrays can be any size in the other dimensions. For instance, triplets of pore coordinates (i.e. [x, y, z]) can be stored for each pore creating an Np-by-3 array.
The storage of topological connections is also very nicely accomplished with this 'list-based' format, by creating an array ('throat.conns') that stores which pore indices are found on either end of a throat. This leads to an Nt-by-2 array.
OpenPNM Objects: Combining dicts and Numpy Arrays
OpenPNM objects combine the above two levels of data storage, meaning they are dicts that are filled with Numpy arrays. OpenPNM enforces several rules to help maintain data consistency:
When storing arrays in an OpenPNM object, their name (or dictionary key) must be prefixed with 'pore.' or 'throat.'.
OpenPNM uses the prefix of the dictionary key to infer how long the array must be.
The specific property that is stored in each array is indicated by the suffix such as 'pore.diameter' or 'throat.length'.
Writing scalar values to OpenPNM objects automatically results in conversion to a full length array filled with the scalar value.
Arrays containing Boolean data are treated as labels, which are explained later in this tutorial.
The following code snippets give examples of how all these pieces fit together using an empty network as an example:
End of explanation
"""
import openpnm as op
%config InlineBackend.figure_formats = ['svg']
import scipy as sp
"""
Explanation: Generate a Cubic Network
Now that we have seen the rough outline of how OpenPNM objects store data, we can begin building a simulation. Start by importing OpenPNM and the Scipy package:
End of explanation
"""
pn = op.network.Cubic(shape=[4, 3, 1], spacing=0.0001)
"""
Explanation: Next, generate a Network by choosing the Cubic class, then create an instance with the desired parameters:
End of explanation
"""
print('The total number of pores on the network is:', pn.num_pores())
print('A short-cut to the total number of pores is:', pn.Np)
print('The total number of throats on the network is:', pn.num_throats())
print('A short-cut to the total number of throats is:', pn.Nt)
print('A list of all calculated properties is availble with:\n', pn.props())
"""
Explanation: The Network object stored in pn contains pores at the correct spatial positions and connections between the pores according the cubic topology.
The shape argument specifies the number of pores in the [X, Y, Z] directions of the cube. Networks in OpenPNM are always 3D dimensional, meaning that a 2D or "flat" network is still 1 layer of pores "thick" so [X, Y, Z] = [20, 10, 1], thus pn in this tutorial is 2D which is easier for visualization.
The spacing argument controls the center-to-center distance between pores and it can be a scalar or vector (i.e. [0.0001, 0.0002, 0.0003]).
The resulting network looks like:
(This image was creating using Paraview, using the instructions given here)
<img src="http://i.imgur.com/ScdydO9l.png" style="width: 60%" align="left"/>
Inspecting Object Properties
OpenPNM objects have additional methods for querying their relevant properties, like the number of pores or throats, which properties have been defined, and so on:
End of explanation
"""
print(pn.pores('left'))
"""
Explanation: Accessing Pores and Throats via Labels
One simple but important feature of OpenPNM is the ability to label pores and throats. When a Cubic network is created, several labels are automatically created: the pores on each face are labeled 'left', 'right', etc. These labels can be used as follows:
End of explanation
"""
print(pn['pore.coords'][pn.pores('left')])
"""
Explanation: The ability to retrieve pore indices is handy for querying pore properties, such as retrieving the pore coordinates of all pores on the 'left' face:
End of explanation
"""
print(pn.labels())
"""
Explanation: A list of all labels currently assigned to the network can be obtained with:
End of explanation
"""
geom = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts)
"""
Explanation: Create a Geometry Object and Assign Geometric Properties to Pores and Throats
The Network pn does not contain any information about pore and throat sizes at this point. The next step is to create a Geometry object to manage the geometrical properties.
End of explanation
"""
geom['pore.diameter'] = np.random.rand(pn.Np)*0.0001 # Units of meters
"""
Explanation: This statement contains three arguments:
network tells the Geometry object which Network it is associated with. There can be multiple networks defined in a given session, so all objects must be associated with a single network.
pores and throats indicate the locations in the Network where this Geometry object will apply. In this tutorial geom applies to all pores and throats, but there are many cases where different regions of the network have different geometrical properties, so OpenPNM allows multiple Geometry objects to be created for managing the data in each region, but this will not be used in this tutorial.
Add Pore and Throat Size Information
This freshly instantiated Geometry object (geom) contains no geometric properties as yet. For this tutorial we'll use the direct assignment of manually calculated values.
We'll start by assigning diameters to each pore from a random distribution, spanning 0 um to 100 um. The upper limit matches the spacing of the Network which was set to 0.0001 m (i.e. 100 um), so pore diameters exceeding 100 um might overlap with their neighbors. Using the Scipy rand function creates an array of random numbers between 0 and 0.0001 that is Np-long, meaning each pore is assigned a unique random number
End of explanation
"""
P12 = pn['throat.conns'] # An Nt x 2 list of pores on the end of each throat
D12 = geom['pore.diameter'][P12] # An Nt x 2 list of pore diameters
Dt = np.amin(D12, axis=1) # An Nt x 1 list of the smaller pore from each pair
geom['throat.diameter'] = Dt
"""
Explanation: We usually want the throat diameters to always be smaller than the two pores which it connects to maintain physical consistency. This requires understanding a little bit about how OpenPNM stores network topology. Consider the following:
End of explanation
"""
Rp = geom['pore.diameter']/2
geom['pore.volume'] = (4/3)*3.14159*(Rp)**3
"""
Explanation: Let's dissect the above lines.
Firstly, P12 is a direct copy of the Network's 'throat.conns' array, which contains the indices of the pore-pair connected by each throat.
Next, this Nt-by-2 array is used to index into the 'pore.diameter' array, resulting in another Nt-by-2 array containing the diameters of the pores on each end of a throat.
Finally, the Scipy function amin is used to find the minimum diameter of each pore-pair by specifying the axis argument as 1, and the resulting Nt-by-1 array is assigned to geom['throat.diameter'].
This trick of using 'throat.conns' to index into a pore property array is commonly used in OpenPNM and you should have a second look at the above code to understand it fully.
We must still specify the remaining geometrical properties of the pores and throats. Since we're creating a "Stick-and-Ball" geometry, the sizes are calculated from the geometrical equations for spheres and cylinders.
For pore volumes, assume a sphere:
End of explanation
"""
C2C = 0.0001 # The center-to-center distance between pores
Rp12 = Rp[pn['throat.conns']]
geom['throat.length'] = C2C - np.sum(Rp12, axis=1)
"""
Explanation: The length of each throat is the center-to-center distance between pores, minus the radius of each of two neighboring pores.
End of explanation
"""
Rt = geom['throat.diameter']/2
Lt = geom['throat.length']
geom['throat.volume'] = 3.14159*(Rt)**2*Lt
"""
Explanation: The volume of each throat is found assuming a cylinder:
End of explanation
"""
import openpnm.models.geometry as gmods
geom.add_model(propname='throat.hydraulic_size_factors',
model=gmods.hydraulic_size_factors.spheres_and_cylinders,
pore_diameter="pore.diameter",
throat_diameter="throat.diameter")
"""
Explanation: The basic geometrical properties of the network are now defined. The Geometry class possesses a method called plot_histograms that produces a plot of the most pertinent geometrical properties. The following figure doesn't look very good since the network in this example has only 12 pores, but the utility of the plot for quick inspection is apparent.
<img src="http://i.imgur.com/xkK1TYfl.png" style="width: 60%" align="left"/>
End of explanation
"""
water = op.phases.GenericPhase(network=pn)
"""
Explanation: Create a Phase Object
The simulation is now topologically and geometrically defined. It has pore coordinates, pore and throat sizes and so on. In order to perform any simulations it is necessary to define a Phase object to manage all the thermophysical properties of the fluids in the simulation:
End of explanation
"""
water['pore.temperature'] = 298.0
water['pore.viscosity'] = 0.001
"""
Explanation: Some notes on this line:
* pn is passed as an argument because Phases must know to which Network they belong.
* Note that pores and throats are NOT specified; this is because Phases are mobile and can exist anywhere or everywhere in the domain, so providing specific locations does not make sense. Algorithms for dynamically determining actual phase distributions are discussed later.
Add Thermophysical Properties
Now it is necessary to fill this Phase object with the desired thermophysical properties. OpenPNM includes a framework for calculating thermophysical properties from models and correlations, but this is covered in :ref:intermediate_usage. For this tutorial, we'll use the basic approach of simply assigning static values as follows:
End of explanation
"""
phys_water = op.physics.GenericPhysics(network=pn, phase=water, geometry=geom)
"""
Explanation: The above lines utilize the fact that OpenPNM converts scalars to full length arrays, essentially setting the temperature in each pore to 298.0 K.
Create a Physics Object
We are still not ready to perform any simulations. The last step is to define the desired pore-scale physics models, which dictate how the phase and geometrical properties interact to give the transport parameters. A classic example of this is the Hagen-Poiseuille equation for fluid flow through a throat to predict the flow rate as a function of the pressure drop. The flow rate is proportional to the geometrical size of the throat (radius and length) as well as properties of the fluid (viscosity) and thus combines geometrical and thermophysical properties:
End of explanation
"""
R = geom['throat.diameter']/2
L = geom['throat.length']
"""
Explanation: As with all objects, the Network must be specified
Physics objects combine information from a Phase (i.e. viscosity) and a Geometry (i.e. throat diameter), so each of these must be specified.
Physics objects do not require the specification of which pores and throats where they apply, since this information is implied by the geometry argument which was already assigned to specific locations.
Specify Desired Pore-Scale Transport Parameters
We need to calculate the numerical values representing our chosen pore-scale physics. To continue with the Hagen-Poiseuille example lets calculate the hydraulic conductance of each throat in the network. The throat radius and length are easily accessed as:
End of explanation
"""
mu_w = 0.001
phys_water['throat.hydraulic_conductance'] = 3.14159*R**4/(8*mu_w*L)
"""
Explanation: The viscosity of the Phases was only defined in the pores; however, the hydraulic conductance must be calculated for each throat. There are several options, but to keep this tutorial simple we'll create a scalar value:
End of explanation
"""
alg = op.algorithms.StokesFlow(network=pn, phase=water)
"""
Explanation: Numpy arrays support vectorization, so since both L and R are arrays of Nt-length, their multiplication in this way results in another array that is also Nt-long.
Create an Algorithm Object for Performing a Permeability Simulation
Finally, it is now possible to run some useful simulations. The code below estimates the permeability through the network by applying a pressure gradient across and calculating the flux. This starts by creating a StokesFlow algorithm, which is pre-defined in OpenPNM:
End of explanation
"""
BC1_pores = pn.pores('front')
alg.set_value_BC(values=202650, pores=BC1_pores)
BC2_pores = pn.pores('back')
alg.set_value_BC(values=101325, pores=BC2_pores)
"""
Explanation: Like all the above objects, Algorithms must be assigned to a Network via the network argument.
This algorithm is also associated with a Phase object, in this case water, which dictates which pore-scale Physics properties to use (recall that phys_water was associated with water). This can be passed as an argument to the instantiation or to the setup function.
Next the boundary conditions are applied using the set_boundary_conditions method on the Algorithm object. Let's apply a 1 atm pressure gradient between the left and right sides of the domain:
End of explanation
"""
alg.run()
"""
Explanation: To actually run the algorithm use the run method:
End of explanation
"""
Q = alg.rate(pores=pn.pores('front'))
A = 0.0001*3*1 # Cross-sectional area for flow
L = 0.0001*4 # Length of flow path
del_P = 101325 # Specified pressure gradient
K = Q*mu_w*L/(A*del_P)
print(K)
"""
Explanation: This builds the coefficient matrix from the existing values of hydraulic conductance, and inverts the matrix to solve for pressure in each pore, and stores the results within the Algorithm's dictionary under 'pore.pressure'.
To determine the permeability coefficient, we must invoke Darcy's law: Q = KA/uL(Pin - Pout). Everything in this equation is known except for the volumetric flow rate Q. The StokesFlow algorithm possesses a rate method that calculates the rate of a quantity leaving a specified set of pores:
End of explanation
"""
water.update(alg.results())
"""
Explanation: The results ('pore.pressure') are held within the alg object and must be explicitly returned to the Phase object by the user if they wish to use these values in a subsequent calculation. The point of this data containment is to prevent unintentional overwriting of data. Each algorithm has a method called results which returns a dictionary of the pertinent simulation results, which can be added to the phase of interest using the update method.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/nerc/cmip6/models/sandbox-3/seaice.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'sandbox-3', 'seaice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: NERC
Source ID: SANDBOX-3
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:27
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation
"""
|
AllenDowney/ThinkBayes2
|
examples/normal.ipynb
|
mit
|
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py and create directories
import os
if not os.path.exists('utils.py'):
!wget https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py
if not os.path.exists('figs'):
!mkdir figs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from empiricaldist import Pmf, Cdf
from utils import decorate, savefig
"""
Explanation: Think Bayes
Second Edition
Copyright 2020 Allen B. Downey
License: Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)
End of explanation
"""
from scipy.stats import norm
data = norm(10, 2).rvs(20)
data
n = len(data)
xbar = np.mean(data)
s2 = np.var(data)
n, xbar, s2
"""
Explanation: Univariate normal
Generate data
End of explanation
"""
mus = np.linspace(8, 12, 101)
prior_mu = Pmf(1, mus)
prior_mu.index.name = 'mu'
sigmas = np.linspace(0.01, 5, 100)
ps = sigmas**-2
prior_sigma = Pmf(ps, sigmas)
prior_sigma.index.name = 'sigma'
from utils import make_joint
prior = make_joint(prior_mu, prior_sigma)
from utils import normalize
def update_norm(prior, data):
"""Update the prior based on data.
prior: joint distribution of mu and sigma
data: sequence of observations
"""
X, Y, Z = np.meshgrid(prior.columns, prior.index, data)
likelihood = norm(X, Y).pdf(Z).prod(axis=2)
posterior = prior * likelihood
normalize(posterior)
return posterior
posterior = update_norm(prior, data)
from utils import marginal
posterior_mu_grid = marginal(posterior, 0)
posterior_sigma_grid = marginal(posterior, 1)
posterior_mu_grid.plot()
decorate(title='Posterior distribution of mu')
posterior_sigma_grid.plot(color='C1')
decorate(title='Posterior distribution of sigma')
"""
Explanation: Grid algorithm
End of explanation
"""
m0 = 0
kappa0 = 0
alpha0 = 0
beta0 = 0
m_n = (kappa0 * m0 + n * xbar) / (kappa0 + n)
m_n
kappa_n = kappa0 + n
kappa_n
alpha_n = alpha0 + n/2
alpha_n
beta_n = beta0 + n*s2/2 + n * kappa0 * (xbar-m0)**2 / (kappa0 + n) / 2
beta_n
def update_normal(prior, summary):
m0, kappa0, alpha0, beta0 = prior
n, xbar, s2 = summary
m_n = (kappa0 * m0 + n * xbar) / (kappa0 + n)
kappa_n = kappa0 + n
alpha_n = alpha0 + n/2
beta_n = (beta0 + n*s2/2 +
n * kappa0 * (xbar-m0)**2 / (kappa0 + n) / 2)
return m_n, kappa_n, alpha_n, beta_n
prior = 0, 0, 0, 0
summary = n, xbar, s2
update_normal(prior, summary)
"""
Explanation: Update
Mostly following notation in Murphy, Conjugate Bayesian analysis of the Gaussian distribution
End of explanation
"""
from scipy.stats import invgamma
dist_sigma2 = invgamma(alpha_n, scale=beta_n)
dist_sigma2.mean()
dist_sigma2.std()
sigma2s = np.linspace(0.01, 20, 101)
ps = dist_sigma2.pdf(sigma2s)
posterior_sigma2_invgammas = Pmf(ps, sigma2s)
posterior_sigma2_invgammas.normalize()
posterior_sigma2_invgammas.plot()
decorate(xlabel='$\sigma^2$',
ylabel='PDF',
title='Posterior distribution of variance')
sigmas = np.sqrt(sigma2s)
posterior_sigma_invgammas = Pmf(ps, sigmas)
posterior_sigma_invgammas.normalize()
posterior_sigma_grid.make_cdf().plot(color='gray', label='grid')
posterior_sigma_invgammas.make_cdf().plot(color='C1', label='invgamma')
decorate(xlabel='$\sigma$',
ylabel='PDF',
title='Posterior distribution of standard deviation')
posterior_sigma_invgammas.mean(), posterior_sigma_grid.mean()
posterior_sigma_invgammas.std(), posterior_sigma_grid.std()
2 / np.sqrt(2 * (n-1))
"""
Explanation: Posterior distribution of sigma
End of explanation
"""
from scipy.stats import t as student_t
def make_student_t(df, loc, scale):
return student_t(df, loc=loc, scale=scale)
df = 2 * alpha_n
precision = alpha_n * kappa_n / beta_n
dist_mu = make_student_t(df, m_n, 1/np.sqrt(precision))
dist_mu.mean()
dist_mu.std()
np.sqrt(4/n)
mus = np.linspace(8, 12, 101)
ps = dist_mu.pdf(mus)
posterior_mu_student = Pmf(ps, mus)
posterior_mu_student.normalize()
posterior_mu_student.plot()
decorate(xlabel='$\mu$',
ylabel='PDF',
title='Posterior distribution of mu')
posterior_mu_grid.make_cdf().plot(color='gray', label='grid')
posterior_mu_student.make_cdf().plot(label='invgamma')
decorate(xlabel='$\mu$',
ylabel='CDF',
title='Posterior distribution of mu')
def make_posterior_mu(m_n, kappa_n, alpha_n, beta_n):
df = 2 * alpha_n
loc = m_n
precision = alpha_n * kappa_n / beta_n
dist_mu = make_student_t(df, loc, 1/np.sqrt(precision))
return dist_mu
"""
Explanation: Posterior distribution of mu
End of explanation
"""
mu_mesh, sigma2_mesh = np.meshgrid(mus, sigma2s)
joint = (dist_sigma2.pdf(sigma2_mesh) *
norm(m_n, sigma2_mesh/kappa_n).pdf(mu_mesh))
joint_df = pd.DataFrame(joint, columns=mus, index=sigma2s)
from utils import plot_contour
plot_contour(joint_df)
decorate(xlabel='$\mu$',
ylabel='$\sigma^2$',
title='Posterior joint distribution')
"""
Explanation: Posterior joint distribution
End of explanation
"""
sample_sigma2 = dist_sigma2.rvs(1000)
sample_mu = norm(m_n, sample_sigma2 / kappa_n).rvs()
sample_pred = norm(sample_mu, np.sqrt(sample_sigma2)).rvs()
cdf_pred = Cdf.from_seq(sample_pred)
cdf_pred.plot()
sample_pred.mean(), sample_pred.var()
"""
Explanation: Sampling from posterior predictive
End of explanation
"""
df = 2 * alpha_n
precision = alpha_n * kappa_n / beta_n / (kappa_n+1)
dist_pred = make_student_t(df, m_n, 1/np.sqrt(precision))
xs = np.linspace(2, 16, 101)
ys = dist_pred.cdf(xs)
plt.plot(xs, ys, color='gray', label='student t')
cdf_pred.plot(label='sample')
decorate(title='Predictive distribution')
def make_posterior_pred(m_n, kappa_n, alpha_n, beta_n):
df = 2 * alpha_n
loc = m_n
precision = alpha_n * kappa_n / beta_n / (kappa_n+1)
dist_pred = make_student_t(df, loc, 1/np.sqrt(precision))
return dist_pred
"""
Explanation: Analytic posterior predictive
End of explanation
"""
mean = [10, 20]
sigma_x = 2
sigma_y = 3
rho = 0.3
cov = rho * sigma_x * sigma_y
Sigma = [[sigma_x**2, cov], [cov, sigma_y**2]]
Sigma
from scipy.stats import multivariate_normal
n = 20
data = multivariate_normal(mean, Sigma).rvs(n)
data
n = len(data)
n
xbar = np.mean(data, axis=0)
xbar
S = np.cov(data.transpose())
S
np.corrcoef(data.transpose())
stds = np.sqrt(np.diag(S))
stds
corrcoef = S / np.outer(stds, stds)
corrcoef
def unpack_cov(S):
stds = np.sqrt(np.diag(S))
corrcoef = S / np.outer(stds, stds)
return stds[0], stds[1], corrcoef[0][1]
sigma_x, sigma_y, rho = unpack_cov(S)
sigma_x, sigma_y, rho
def pack_cov(sigma_x, sigma_y, rho):
cov = sigma_x * sigma_y * rho
return np.array([[sigma_x**2, cov], [cov, sigma_y**2]])
pack_cov(sigma_x, sigma_y, rho)
S
"""
Explanation: Multivariate normal
Generate data
End of explanation
"""
m_0 = 0
Lambda_0 = 0
nu_0 = 0
kappa_0 = 0
m_n = (kappa_0 * m_0 + n * xbar) / (kappa_0 + n)
m_n
xbar
diff = (xbar - m_0)
D = np.outer(diff, diff)
D
Lambda_n = Lambda_0 + S + n * kappa_0 * D / (kappa_0 + n)
Lambda_n
S
nu_n = nu_0 + n
nu_n
kappa_n = kappa_0 + n
kappa_n
"""
Explanation: Update
End of explanation
"""
from scipy.stats import invwishart
def make_invwishart(nu, Lambda):
d, _ = Lambda.shape
return invwishart(nu, scale=Lambda * (nu - d - 1))
dist_cov = make_invwishart(nu_n, Lambda_n)
dist_cov.mean()
S
sample_Sigma = dist_cov.rvs(1000)
np.mean(sample_Sigma, axis=0)
res = [unpack_cov(Sigma) for Sigma in sample_Sigma]
sample_sigma_x, sample_sigma_y, sample_rho = np.transpose(res)
sample_sigma_x.mean(), sample_sigma_y.mean(), sample_rho.mean()
unpack_cov(S)
Cdf.from_seq(sample_sigma_x).plot(label=r'$\sigma_x$')
Cdf.from_seq(sample_sigma_y).plot(label=r'$\sigma_y$')
decorate(xlabel='Standard deviation',
ylabel='CDF',
title='Posterior distribution of standard deviation')
Cdf.from_seq(sample_rho).plot()
decorate(xlabel='Coefficient of correlation',
ylabel='CDF',
title='Posterior distribution of correlation')
"""
Explanation: Posterior distribution of covariance
End of explanation
"""
num = 51
sigma_xs = np.linspace(0.01, 10, num)
sigma_ys = np.linspace(0.01, 10, num)
rhos = np.linspace(-0.3, 0.9, num)
index = pd.MultiIndex.from_product([sigma_xs, sigma_ys, rhos],
names=['sigma_x', 'sigma_y', 'rho'])
joint = Pmf(0, index)
joint.head()
dist_cov.pdf(S)
for sigma_x, sigma_y, rho in joint.index:
Sigma = pack_cov(sigma_x, sigma_y, rho)
joint.loc[sigma_x, sigma_y, rho] = dist_cov.pdf(Sigma)
joint.normalize()
from utils import pmf_marginal
posterior_sigma_x = pmf_marginal(joint, 0)
posterior_sigma_y = pmf_marginal(joint, 1)
marginal_rho = pmf_marginal(joint, 2)
posterior_sigma_x.mean(), posterior_sigma_y.mean(), marginal_rho.mean()
unpack_cov(S)
posterior_sigma_x.plot(label='$\sigma_x$')
posterior_sigma_y.plot(label='$\sigma_y$')
decorate(xlabel='Standard deviation',
ylabel='PDF',
title='Posterior distribution of standard deviation')
posterior_sigma_x.make_cdf().plot(color='gray')
posterior_sigma_y.make_cdf().plot(color='gray')
Cdf.from_seq(sample_sigma_x).plot(label=r'$\sigma_x$')
Cdf.from_seq(sample_sigma_y).plot(label=r'$\sigma_y$')
decorate(xlabel='Standard deviation',
ylabel='CDF',
title='Posterior distribution of standard deviation')
marginal_rho.make_cdf().plot(color='gray')
Cdf.from_seq(sample_rho).plot()
decorate(xlabel='Coefficient of correlation',
ylabel='CDF',
title='Posterior distribution of correlation')
"""
Explanation: Evaluate the Inverse Wishart PDF
End of explanation
"""
m_n
sample_mu = [multivariate_normal(m_n, Sigma/kappa_n).rvs()
for Sigma in sample_Sigma]
sample_mu0, sample_mu1 = np.transpose(sample_mu)
sample_mu0.mean(), sample_mu1.mean()
xbar
sample_mu0.std(), sample_mu1.std()
2 / np.sqrt(n), 3 / np.sqrt(n)
Cdf.from_seq(sample_mu0).plot(label=r'$\mu_0$ sample')
Cdf.from_seq(sample_mu1).plot(label=r'$\mu_1$ sample')
decorate(xlabel=r'$\mu$',
ylabel='CDF',
title=r'Posterior distribution of $\mu$')
"""
Explanation: Posterior distribution of mu
End of explanation
"""
from scipy.special import gammaln
def multistudent_pdf(x, mean, shape, df):
return np.exp(logpdf(x, mean, shape, df))
def logpdf(x, mean, shape, df):
p = len(mean)
vals, vecs = np.linalg.eigh(shape)
logdet = np.log(vals).sum()
valsinv = np.array([1.0/v for v in vals])
U = vecs * np.sqrt(valsinv)
dev = x - mean
maha = np.square(dev @ U).sum(axis=-1)
t = 0.5 * (df + p)
A = gammaln(t)
B = gammaln(0.5 * df)
C = p/2. * np.log(df * np.pi)
D = 0.5 * logdet
E = -t * np.log(1 + (1./df) * maha)
return A - B - C - D + E
d = len(m_n)
x = m_n
mean = m_n
df = nu_n - d + 1
shape = Lambda_n / kappa_n
multistudent_pdf(x, mean, shape, df)
mu0s = np.linspace(8, 12, 91)
mu1s = np.linspace(18, 22, 101)
mu_mesh = np.dstack(np.meshgrid(mu0s, mu1s))
mu_mesh.shape
ps = multistudent_pdf(mu_mesh, mean, shape, df)
joint = pd.DataFrame(ps, columns=mu0s, index=mu1s)
normalize(joint)
plot_contour(joint)
from utils import marginal
posterior_mu0_student = marginal(joint, 0)
posterior_mu1_student = marginal(joint, 1)
posterior_mu0_student.make_cdf().plot(color='gray', label=r'$\mu_0 multi t$')
posterior_mu1_student.make_cdf().plot(color='gray', label=r'$\mu_1 multi t$')
Cdf.from_seq(sample_mu0).plot(label=r'$\mu_0$ sample')
Cdf.from_seq(sample_mu1).plot(label=r'$\mu_1$ sample')
decorate(xlabel=r'$\mu$',
ylabel='CDF',
title=r'Posterior distribution of $\mu$')
"""
Explanation: Multivariate student t
Let's use this implementation
End of explanation
"""
prior = 0, 0, 0, 0
summary = n, xbar[0], S[0][0]
summary
params = update_normal(prior, summary)
params
dist_mu0 = make_posterior_mu(*params)
dist_mu0.mean(), dist_mu0.std()
mu0s = np.linspace(7, 12, 101)
ps = dist_mu0.pdf(mu0s)
posterior_mu0 = Pmf(ps, index=mu0s)
posterior_mu0.normalize()
prior = 0, 0, 0, 0
summary = n, xbar[1], S[1][1]
summary
params = update_normal(prior, summary)
params
dist_mu1 = make_posterior_mu(*params)
dist_mu1.mean(), dist_mu1.std()
mu1s = np.linspace(17, 23, 101)
ps = dist_mu1.pdf(mu1s)
posterior_mu1 = Pmf(ps, index=mu1s)
posterior_mu1.normalize()
posterior_mu0.make_cdf().plot(label=r'$\mu_0$ uni t', color='gray')
posterior_mu1.make_cdf().plot(label=r'$\mu_1$ uni t', color='gray')
Cdf.from_seq(sample_mu0).plot(label=r'$\mu_0$ sample')
Cdf.from_seq(sample_mu1).plot(label=r'$\mu_1$ sample')
decorate(xlabel=r'$\mu$',
ylabel='CDF',
title=r'Posterior distribution of $\mu$')
"""
Explanation: Compare to analytic univariate distributions
End of explanation
"""
sample_pred = [multivariate_normal(mu, Sigma).rvs()
for mu, Sigma in zip(sample_mu, sample_Sigma)]
sample_x0, sample_x1 = np.transpose(sample_pred)
sample_x0.mean(), sample_x1.mean()
sample_x0.std(), sample_x1.std()
prior = 0, 0, 0, 0
summary = n, xbar[0], S[0][0]
params = update_normal(prior, summary)
dist_x0 = make_posterior_pred(*params)
dist_x0.mean(), dist_x0.std()
x0s = np.linspace(2, 18, 101)
ps = dist_x0.pdf(x0s)
pred_x0 = Pmf(ps, index=x0s)
pred_x0.normalize()
prior = 0, 0, 0, 0
summary = n, xbar[1], S[1][1]
params = update_normal(prior, summary)
dist_x1 = make_posterior_pred(*params)
dist_x1.mean(), dist_x1.std()
x1s = np.linspace(10, 30, 101)
ps = dist_x1.pdf(x1s)
pred_x1 = Pmf(ps, index=x1s)
pred_x1.normalize()
pred_x0.make_cdf().plot(label=r'$x_0$ student t', color='gray')
pred_x1.make_cdf().plot(label=r'$x_1$ student t', color='gray')
Cdf.from_seq(sample_x0).plot(label=r'$x_0$ sample')
Cdf.from_seq(sample_x1).plot(label=r'$x_1$ sample')
decorate(xlabel='Quantity',
ylabel='CDF',
title='Posterior predictive distributions')
"""
Explanation: Sampling from posterior predictive
End of explanation
"""
d = len(m_n)
x = m_n
mean = m_n
df = nu_n - d + 1
shape = Lambda_n * (kappa_n+1) / kappa_n
multistudent_pdf(x, mean, shape, df)
x0s = np.linspace(0, 20, 91)
x1s = np.linspace(10, 30, 101)
x_mesh = np.dstack(np.meshgrid(x0s, x1s))
x_mesh.shape
ps = multistudent_pdf(x_mesh, mean, shape, df)
joint = pd.DataFrame(ps, columns=x0s, index=x1s)
normalize(joint)
plot_contour(joint)
from utils import marginal
posterior_x0_student = marginal(joint, 0)
posterior_x1_student = marginal(joint, 1)
posterior_x0_student.make_cdf().plot(color='gray', label=r'$x_0$ multi t')
posterior_x1_student.make_cdf().plot(color='gray', label=r'$x_1$ multi t')
Cdf.from_seq(sample_x0).plot(label=r'$x_0$ sample')
Cdf.from_seq(sample_x1).plot(label=r'$x_1$ sample')
decorate(xlabel='Quantity',
ylabel='CDF',
title='Posterior predictive distributions')
"""
Explanation: Comparing to the multivariate student t
End of explanation
"""
inter, slope = 5, 2
sigma = 3
n = 20
xs = norm(0, 3).rvs(n)
xs = np.sort(xs)
ys = inter + slope * xs + norm(0, sigma).rvs(20)
plt.plot(xs, ys, 'o');
import statsmodels.api as sm
X = sm.add_constant(xs)
X
model = sm.OLS(ys, X)
results = model.fit()
results.summary()
beta_hat = results.params
beta_hat
# k = results.df_model
k = 2
s2 = results.resid @ results.resid / (n - k)
s2
s2 = results.ssr / (n - k)
s2
np.sqrt(s2)
"""
Explanation: Bayesian linear regression
Generate data
End of explanation
"""
beta0s = np.linspace(2, 8, 71)
prior_inter = Pmf(1, beta0s, name='inter')
prior_inter.index.name = 'Intercept'
beta1s = np.linspace(1, 3, 61)
prior_slope = Pmf(1, beta1s, name='slope')
prior_slope.index.name = 'Slope'
sigmas = np.linspace(1, 6, 51)
ps = sigmas**-2
prior_sigma = Pmf(ps, sigmas, name='sigma')
prior_sigma.index.name = 'Sigma'
prior_sigma.normalize()
prior_sigma.plot()
from utils import make_joint
def make_joint3(pmf1, pmf2, pmf3):
"""Make a joint distribution with three parameters.
pmf1: Pmf object
pmf2: Pmf object
pmf3: Pmf object
returns: Pmf representing a joint distribution
"""
joint2 = make_joint(pmf2, pmf1).stack()
joint3 = make_joint(pmf3, joint2).stack()
return Pmf(joint3)
prior3 = make_joint3(prior_slope, prior_inter, prior_sigma)
prior3.head()
from utils import normalize
def update_optimized(prior, data):
"""Posterior distribution of regression parameters
`slope`, `inter`, and `sigma`.
prior: Pmf representing the joint prior
data: DataFrame with columns `x` and `y`
returns: Pmf representing the joint posterior
"""
xs = data['x']
ys = data['y']
sigmas = prior.columns
likelihood = prior.copy()
for slope, inter in prior.index:
expected = slope * xs + inter
resid = ys - expected
resid_mesh, sigma_mesh = np.meshgrid(resid, sigmas)
densities = norm.pdf(resid_mesh, 0, sigma_mesh)
likelihood.loc[slope, inter] = densities.prod(axis=1)
posterior = prior * likelihood
normalize(posterior)
return posterior
data = pd.DataFrame(dict(x=xs, y=ys))
from utils import normalize
posterior = update_optimized(prior3.unstack(), data)
normalize(posterior)
from utils import marginal
posterior_sigma_grid = marginal(posterior, 0)
posterior_sigma_grid.plot(label='grid')
decorate(title='Posterior distribution of sigma')
joint_posterior = marginal(posterior, 1).unstack()
plot_contour(joint_posterior)
posterior_beta0_grid = marginal(joint_posterior, 0)
posterior_beta1_grid = marginal(joint_posterior, 1)
posterior_beta0_grid.make_cdf().plot(label=r'$\beta_0$')
posterior_beta1_grid.make_cdf().plot(label=r'$\beta_1$')
decorate(title='Posterior distributions of parameters')
"""
Explanation: Grid algorithm
End of explanation
"""
nu = n-k
nu/2, nu*s2/2
from scipy.stats import invgamma
dist_sigma2 = invgamma(nu/2, scale=nu*s2/2)
dist_sigma2.mean()
sigma2s = np.linspace(0.01, 30, 101)
ps = dist_sigma2.pdf(sigma2s)
posterior_sigma2_invgamma = Pmf(ps, sigma2s)
posterior_sigma2_invgamma.normalize()
posterior_sigma2_invgamma.plot()
sigmas = np.sqrt(sigma2s)
posterior_sigma_invgamma = Pmf(ps, sigmas)
posterior_sigma_invgamma.normalize()
posterior_sigma_invgamma.mean(), posterior_sigma_grid.mean()
posterior_sigma_grid.make_cdf().plot(color='gray', label='grid')
posterior_sigma_invgamma.make_cdf().plot(label='invgamma')
decorate(title='Posterior distribution of sigma')
"""
Explanation: Posterior distribution of sigma
According to Gelman et al, the posterior distribution of $\sigma^2$ is scaled inverse chi2 with $\nu=n-k$ and scale $s^2$.
According to Wikipedia, that's equivalent to inverse gamma with parameters $\nu/2$ and $\nu s^2 / 2$.
End of explanation
"""
Lambda_0 = np.zeros((k, k))
Lambda_n = Lambda_0 + X.T @ X
Lambda_n
from scipy.linalg import inv
mu_0 = np.zeros(k)
mu_n = inv(Lambda_n) @ (Lambda_0 @ mu_0 + X.T @ X @ beta_hat)
mu_n
a_0 = 0
a_n = a_0 + n / 2
a_n
b_0 = 0
b_n = b_0 + (ys.T @ ys +
mu_0.T @ Lambda_0 @ mu_0 -
mu_n.T @ Lambda_n @ mu_n) / 2
b_n
a_n, nu/2
b_n, nu * s2 / 2
"""
Explanation: Posterior distribution of sigma, updatable version
Per the Wikipedia page: https://en.wikipedia.org/wiki/Bayesian_linear_regression
End of explanation
"""
sample_sigma2 = dist_sigma2.rvs(1000)
sample_sigma = np.sqrt(sample_sigma2)
from scipy.linalg import inv
V_beta = inv(X.T @ X)
V_beta
sample_beta = [multivariate_normal(beta_hat, V_beta * sigma2).rvs()
for sigma2 in sample_sigma2]
np.mean(sample_beta, axis=0)
beta_hat
np.std(sample_beta, axis=0)
results.bse
sample_beta0, sample_beta1 = np.transpose(sample_beta)
Cdf.from_seq(sample_beta0).plot(label=r'$\beta_0$')
Cdf.from_seq(sample_beta1).plot(label=r'$\beta_1$')
decorate(title='Posterior distributions of the parameters')
"""
Explanation: Sampling the posterior of the parameters
End of explanation
"""
x = beta_hat
mean = beta_hat
df = (n - k)
shape = (V_beta * s2)
multistudent_pdf(x, mean, shape, df)
low, high = sample_beta0.min(), sample_beta0.max()
low, high
beta0s = np.linspace(0.9*low, 1.1*high, 101)
low, high = sample_beta1.min(), sample_beta1.max()
beta1s = np.linspace(0.9*low, 1.1*high, 91)
beta0_mesh, beta1_mesh = np.meshgrid(beta0s, beta1s)
beta_mesh = np.dstack(np.meshgrid(beta0s, beta1s))
beta_mesh.shape
ps = multistudent_pdf(beta_mesh, mean, shape, df)
ps.shape
joint = pd.DataFrame(ps, columns=beta0s, index=beta1s)
from utils import normalize
normalize(joint)
from utils import plot_contour
plot_contour(joint)
decorate(xlabel=r'$\beta_0$',
ylabel=r'$\beta_1$')
marginal_beta0_student = marginal(joint, 0)
marginal_beta1_student = marginal(joint, 1)
from utils import marginal
posterior_beta0_grid.make_cdf().plot(color='gray', label=r'grid $\beta_0$')
posterior_beta1_grid.make_cdf().plot(color='gray', label=r'grid $\beta_1$')
marginal_beta0_student.make_cdf().plot(label=r'student $\beta_0$', color='gray')
marginal_beta1_student.make_cdf().plot(label=r'student $\beta_0$', color='gray')
Cdf.from_seq(sample_beta0).plot(label=r'sample $\beta_0$')
Cdf.from_seq(sample_beta1).plot(label=r'sample $\beta_1$')
decorate()
"""
Explanation: Posterior using multivariate Student t
End of explanation
"""
t = [X @ beta + norm(0, sigma).rvs(n)
for beta, sigma in zip(sample_beta, sample_sigma)]
predictions = np.array(t)
predictions.shape
low, median, high = np.percentile(predictions, [5, 50, 95], axis=0)
plt.plot(xs, ys, 'o')
plt.plot(xs, median)
plt.fill_between(xs, low, high, color='C1', alpha=0.3)
"""
Explanation: Sampling the predictive distribution
End of explanation
"""
xnew = [1, 2, 3]
Xnew = sm.add_constant(xnew)
Xnew
t = [Xnew @ beta + norm(0, sigma).rvs(len(xnew))
for beta, sigma in zip(sample_beta, sample_sigma)]
predictions = np.array(t)
predictions.shape
x0, x1, x2 = predictions.T
Cdf.from_seq(x0).plot()
Cdf.from_seq(x1).plot()
Cdf.from_seq(x2).plot()
mu_new = Xnew @ beta_hat
mu_new
cov_new = s2 * (np.eye(len(xnew)) + Xnew @ V_beta @ Xnew.T)
cov_new
x = mu_new
mean = mu_new
df = (n - k)
shape = cov_new
multistudent_pdf(x, mean, shape, df)
y1s = np.linspace(0, 20, 51)
y0s = np.linspace(0, 20, 61)
y2s = np.linspace(0, 20, 71)
mesh = np.stack(np.meshgrid(y0s, y1s, y2s), axis=-1)
mesh.shape
ps = multistudent_pdf(mesh, mean, shape, df)
ps.shape
ps /= ps.sum()
ps.sum()
p1s = ps.sum(axis=1).sum(axis=1)
p1s.shape
p0s = ps.sum(axis=0).sum(axis=1)
p0s.shape
p2s = ps.sum(axis=0).sum(axis=0)
p2s.shape
pmf_y0 = Pmf(p0s, y0s)
pmf_y1 = Pmf(p1s, y1s)
pmf_y2 = Pmf(p2s, y2s)
pmf_y0.mean(), pmf_y1.mean(), pmf_y2.mean()
pmf_y0.make_cdf().plot(color='gray')
pmf_y1.make_cdf().plot(color='gray')
pmf_y2.make_cdf().plot(color='gray')
Cdf.from_seq(x0).plot()
Cdf.from_seq(x1).plot()
Cdf.from_seq(x2).plot()
stop
"""
Explanation: Modeling the predictive distribution
End of explanation
"""
from scipy.stats import chi2
class NormalInverseWishartDistribution(object):
def __init__(self, mu, lmbda, nu, psi):
self.mu = mu
self.lmbda = float(lmbda)
self.nu = nu
self.psi = psi
self.inv_psi = np.linalg.inv(psi)
def sample(self):
sigma = np.linalg.inv(self.wishartrand())
return (np.random.multivariate_normal(self.mu, sigma / self.lmbda), sigma)
def wishartrand(self):
dim = self.inv_psi.shape[0]
chol = np.linalg.cholesky(self.inv_psi)
foo = np.zeros((dim,dim))
for i in range(dim):
for j in range(i+1):
if i == j:
foo[i,j] = np.sqrt(chi2.rvs(self.nu-(i+1)+1))
else:
foo[i,j] = np.random.normal(0,1)
return np.dot(chol, np.dot(foo, np.dot(foo.T, chol.T)))
def posterior(self, data):
n = len(data)
mean_data = np.mean(data, axis=0)
sum_squares = np.sum([np.array(np.matrix(x - mean_data).T * np.matrix(x - mean_data)) for x in data], axis=0)
mu_n = (self.lmbda * self.mu + n * mean_data) / (self.lmbda + n)
lmbda_n = self.lmbda + n
nu_n = self.nu + n
dev = mean_data - self.mu
psi_n = (self.psi + sum_squares +
self.lmbda * n / (self.lmbda + n) * np.array(dev.T @ dev))
return NormalInverseWishartDistribution(mu_n, lmbda_n, nu_n, psi_n)
x = NormalInverseWishartDistribution(np.array([0,0])-3,1,3,np.eye(2))
samples = [x.sample() for _ in range(100)]
data = [np.random.multivariate_normal(mu,cov) for mu,cov in samples]
y = NormalInverseWishartDistribution(np.array([0,0]),1,3,np.eye(2))
z = y.posterior(data)
print('mu_n: {0}'.format(z.mu))
print('psi_n: {0}'.format(z.psi))
from scipy.linalg import inv
from scipy.linalg import cholesky
def wishartrand(nu, Lambda):
d, _ = Lambda.shape
chol = cholesky(Lambda)
foo = np.empty((d, d))
for i in range(d):
for j in range(i+1):
if i == j:
foo[i,j] = np.sqrt(chi2.rvs(nu-(i+1)+1))
else:
foo[i,j] = np.random.normal(0, 1)
return np.dot(chol, np.dot(foo, np.dot(foo.T, chol.T)))
sample = [wishartrand(nu_n, Lambda_n) for i in range(1000)]
np.mean(sample, axis=0)
Lambda_n
"""
Explanation: Leftovers
Related discussion saved for the future
https://stats.stackexchange.com/questions/78177/posterior-covariance-of-normal-inverse-wishart-not-converging-properly
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.17/_downloads/78709b76a2a2e07e4ff056048455fb17/plot_objects_from_arrays.ipynb
|
bsd-3-clause
|
# Author: Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: BSD (3-clause)
import numpy as np
import neo
import mne
print(__doc__)
"""
Explanation: Creating MNE objects from data arrays
In this simple example, the creation of MNE objects from
numpy arrays is demonstrated. In the last example case, a
NEO file format is used as a source for the data.
End of explanation
"""
sfreq = 1000 # Sampling frequency
times = np.arange(0, 10, 0.001) # Use 10000 samples (10s)
sin = np.sin(times * 10) # Multiplied by 10 for shorter cycles
cos = np.cos(times * 10)
sinX2 = sin * 2
cosX2 = cos * 2
# Numpy array of size 4 X 10000.
data = np.array([sin, cos, sinX2, cosX2])
# Definition of channel types and names.
ch_types = ['mag', 'mag', 'grad', 'grad']
ch_names = ['sin', 'cos', 'sinX2', 'cosX2']
"""
Explanation: Create arbitrary data
End of explanation
"""
# It is also possible to use info from another raw object.
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
"""
Explanation: Create an :class:info <mne.Info> object.
End of explanation
"""
raw = mne.io.RawArray(data, info)
# Scaling of the figure.
# For actual EEG/MEG data different scaling factors should be used.
scalings = {'mag': 2, 'grad': 2}
raw.plot(n_channels=4, scalings=scalings, title='Data from arrays',
show=True, block=True)
# It is also possible to auto-compute scalings
scalings = 'auto' # Could also pass a dictionary with some value == 'auto'
raw.plot(n_channels=4, scalings=scalings, title='Auto-scaled Data from arrays',
show=True, block=True)
"""
Explanation: Create a dummy :class:mne.io.RawArray object
End of explanation
"""
event_id = 1 # This is used to identify the events.
# First column is for the sample number.
events = np.array([[200, 0, event_id],
[1200, 0, event_id],
[2000, 0, event_id]]) # List of three arbitrary events
# Here a data set of 700 ms epochs from 2 channels is
# created from sin and cos data.
# Any data in shape (n_epochs, n_channels, n_times) can be used.
epochs_data = np.array([[sin[:700], cos[:700]],
[sin[1000:1700], cos[1000:1700]],
[sin[1800:2500], cos[1800:2500]]])
ch_names = ['sin', 'cos']
ch_types = ['mag', 'mag']
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
epochs = mne.EpochsArray(epochs_data, info=info, events=events,
event_id={'arbitrary': 1})
picks = mne.pick_types(info, meg=True, eeg=False, misc=False)
epochs.plot(picks=picks, scalings='auto', show=True, block=True)
"""
Explanation: EpochsArray
End of explanation
"""
nave = len(epochs_data) # Number of averaged epochs
evoked_data = np.mean(epochs_data, axis=0)
evokeds = mne.EvokedArray(evoked_data, info=info, tmin=-0.2,
comment='Arbitrary', nave=nave)
evokeds.plot(picks=picks, show=True, units={'mag': '-'},
titles={'mag': 'sin and cos averaged'}, time_unit='s')
"""
Explanation: EvokedArray
End of explanation
"""
# The events are spaced evenly every 1 second.
duration = 1.
# create a fixed size events array
# start=0 and stop=None by default
events = mne.make_fixed_length_events(raw, event_id, duration=duration)
print(events)
# for fixed size events no start time before and after event
tmin = 0.
tmax = 0.99 # inclusive tmax, 1 second epochs
# create :class:`Epochs <mne.Epochs>` object
epochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, baseline=None, verbose=True)
epochs.plot(scalings='auto', block=True)
"""
Explanation: Create epochs by windowing the raw data.
End of explanation
"""
duration = 0.5
events = mne.make_fixed_length_events(raw, event_id, duration=duration)
print(events)
epochs = mne.Epochs(raw, events=events, tmin=tmin, tmax=tmax, baseline=None,
verbose=True)
epochs.plot(scalings='auto', block=True)
"""
Explanation: Create overlapping epochs using :func:mne.make_fixed_length_events (50 %
overlap). This also roughly doubles the amount of events compared to the
previous event list.
End of explanation
"""
# The example here uses the ExampleIO object for creating fake data.
# For actual data and different file formats, consult the NEO documentation.
reader = neo.io.ExampleIO('fakedata.nof')
bl = reader.read(lazy=False)[0]
# Get data from first (and only) segment
seg = bl.segments[0]
title = seg.file_origin
ch_names = list()
data = list()
for ai, asig in enumerate(seg.analogsignals):
# Since the data does not contain channel names, channel indices are used.
ch_names.append('Neo %02d' % (ai + 1,))
# We need the ravel() here because Neo < 0.5 gave 1D, Neo 0.5 gives
# 2D (but still a single channel).
data.append(asig.rescale('V').magnitude.ravel())
data = np.array(data, float)
sfreq = int(seg.analogsignals[0].sampling_rate.magnitude)
# By default, the channel types are assumed to be 'misc'.
info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
raw = mne.io.RawArray(data, info)
raw.plot(n_channels=4, scalings={'misc': 1}, title='Data from NEO',
show=True, block=True, clipping='clamp')
"""
Explanation: Extracting data from NEO file
End of explanation
"""
|
peastman/deepchem
|
examples/tutorials/Modeling_Protein_Ligand_Interactions_With_Atomic_Convolutions.ipynb
|
mit
|
!curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import conda_installer
conda_installer.install()
!/root/miniconda/bin/conda info -e
!/root/miniconda/bin/conda install -c conda-forge mdtraj -y -q # needed for AtomicConvs
!pip install --pre deepchem
import deepchem
deepchem.__version__
import deepchem as dc
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from rdkit import Chem
from deepchem.molnet import load_pdbbind
from deepchem.models import AtomicConvModel
from deepchem.feat import AtomicConvFeaturizer
"""
Explanation: Modeling Protein-Ligand Interactions with Atomic Convolutions
By Nathan C. Frey | Twitter and Bharath Ramsundar | Twitter
This DeepChem tutorial introduces the Atomic Convolutional Neural Network. We'll see the structure of the AtomicConvModel and write a simple program to run Atomic Convolutions.
ACNN Architecture
ACNN’s directly exploit the local three-dimensional structure of molecules to hierarchically learn more complex chemical features by optimizing both the model and featurization simultaneously in an end-to-end fashion.
The atom type convolution makes use of a neighbor-listed distance matrix to extract features encoding local chemical environments from an input representation (Cartesian atomic coordinates) that does not necessarily contain spatial locality. The following methods are used to build the ACNN architecture:
Distance Matrix
The distance matrix $R$ is constructed from the Cartesian atomic coordinates $X$. It calculates distances from the distance tensor $D$. The distance matrix construction accepts as input a $(N, 3)$ coordinate matrix $C$. This matrix is “neighbor listed” into a $(N, M)$ matrix $R$.
python
R = tf.reduce_sum(tf.multiply(D, D), 3) # D: Distance Tensor
R = tf.sqrt(R) # R: Distance Matrix
return R
Atom type convolution
The output of the atom type convolution is constructed from the distance matrix $R$ and atomic number matrix $Z$. The matrix $R$ is fed into a (1x1) filter with stride 1 and depth of $N_{at}$ , where $N_{at}$ is the number of unique atomic numbers (atom types) present in the molecular system. The atom type convolution kernel is a step function that operates on the neighbor distance matrix $R$.
Radial Pooling layer
Radial Pooling is basically a dimensionality reduction process that down-samples the output of the atom type convolutions. The reduction process prevents overfitting by providing an abstracted form of representation through feature binning, as well as reducing the number of parameters learned.
Mathematically, radial pooling layers pool over tensor slices (receptive fields) of size (1x$M$x1) with stride 1 and a depth of $N_r$, where $N_r$ is the number of desired radial filters and $M$ is the maximum number of neighbors.
Atomistic fully connected network
Atomic Convolution layers are stacked by feeding the flattened ($N$, $N_{at}$ $\cdot$ $N_r$) output of the radial pooling layer into the atom type convolution operation. Finally, we feed the tensor row-wise (per-atom) into a fully-connected network. The
same fully connected weights and biases are used for each atom in a given molecule.
Now that we have seen the structural overview of ACNNs, we'll try to get deeper into the model and see how we can train it and what we expect as the output.
For the training, we will use the publicly available PDBbind dataset. In this example, every row reflects a protein-ligand complex and the target is the binding affinity ($K_i$) of the ligand to the protein in the complex.
Colab
This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
Setup
To run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment.
End of explanation
"""
f1_num_atoms = 100 # maximum number of atoms to consider in the ligand
f2_num_atoms = 1000 # maximum number of atoms to consider in the protein
max_num_neighbors = 12 # maximum number of spatial neighbors for an atom
acf = AtomicConvFeaturizer(frag1_num_atoms=f1_num_atoms,
frag2_num_atoms=f2_num_atoms,
complex_num_atoms=f1_num_atoms+f2_num_atoms,
max_num_neighbors=max_num_neighbors,
neighbor_cutoff=4)
"""
Explanation: Getting protein-ligand data
If you worked through Tutorial 13 on modeling protein-ligand interactions, you'll already be familiar with how to obtain a set of data from PDBbind for training our model. Since we explored molecular complexes in detail in the previous tutorial, this time we'll simply initialize an AtomicConvFeaturizer and load the PDBbind dataset directly using MolNet.
End of explanation
"""
%%time
tasks, datasets, transformers = load_pdbbind(featurizer=acf,
save_dir='.',
data_dir='.',
pocket=True,
reload=False,
set_name='core')
datasets
train, val, test = datasets
"""
Explanation: load_pdbbind allows us to specify if we want to use the entire protein or only the binding pocket (pocket=True) for featurization. Using only the pocket saves memory and speeds up the featurization. We can also use the "core" dataset of ~200 high-quality complexes for rapidly testing our model, or the larger "refined" set of nearly 5000 complexes for more datapoints and more robust training/validation. On Colab, it takes only a minute to featurize the core PDBbind set! This is pretty incredible, and it means you can quickly experiment with different featurizations and model architectures.
End of explanation
"""
acm = AtomicConvModel(n_tasks=1,
frag1_num_atoms=f1_num_atoms,
frag2_num_atoms=f2_num_atoms,
complex_num_atoms=f1_num_atoms+f2_num_atoms,
max_num_neighbors=max_num_neighbors,
batch_size=12,
layer_sizes=[32, 32, 16],
learning_rate=0.003,
)
losses, val_losses = [], []
%%time
max_epochs = 50
for epoch in range(max_epochs):
loss = acm.fit(train, nb_epoch=1, max_checkpoints_to_keep=1, all_losses=losses)
metric = dc.metrics.Metric(dc.metrics.score_function.rms_score)
val_losses.append(acm.evaluate(val, metrics=[metric])['rms_score']**2) # L2 Loss
"""
Explanation: Training the model
Now that we've got our dataset, let's go ahead and initialize an AtomicConvModel to train. Keep the input parameters the same as those used in AtomicConvFeaturizer, or else we'll get errors. layer_sizes controls the number of layers and the size of each dense layer in the network. We choose these hyperparameters to be the same as those used in the original paper.
End of explanation
"""
f, ax = plt.subplots()
ax.scatter(range(len(losses)), losses, label='train loss')
ax.scatter(range(len(val_losses)), val_losses, label='val loss')
plt.legend(loc='upper right');
"""
Explanation: The loss curves are not exactly smooth, which is unsurprising because we are using 154 training and 19 validation datapoints. Increasing the dataset size may help with this, but will also require greater computational resources.
End of explanation
"""
score = dc.metrics.Metric(dc.metrics.score_function.pearson_r2_score)
for tvt, ds in zip(['train', 'val', 'test'], datasets):
print(tvt, acm.evaluate(ds, metrics=[score]))
"""
Explanation: The ACNN paper showed a Pearson $R^2$ score of 0.912 and 0.448 for a random 80/20 split of the PDBbind core train/test sets. Here, we've used an 80/10/10 training/validation/test split and achieved similar performance for the training set (0.943). We can see from the performance on the training, validation, and test sets (and from the results in the paper) that the ACNN can learn chemical interactions from small training datasets, but struggles to generalize. Still, it is pretty amazing that we can train an AtomicConvModel with only a few lines of code and start predicting binding affinities!
From here, you can experiment with different hyperparameters, more challenging splits, and the "refined" set of PDBbind to see if you can reduce overfitting and come up with a more robust model.
End of explanation
"""
|
amueller/advanced_training
|
01.2 Linear models.ipynb
|
bsd-2-clause
|
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y, true_coefficient = make_regression(n_samples=80, n_features=30, n_informative=10, noise=100, coef=True, random_state=5)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5)
print(X_train.shape)
print(y_train.shape)
"""
Explanation: %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
Linear models for regression
y_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_
End of explanation
"""
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression().fit(X_train, y_train)
print("R^2 on training set: %f" % linear_regression.score(X_train, y_train))
print("R^2 on test set: %f" % linear_regression.score(X_test, y_test))
from sklearn.metrics import r2_score
print(r2_score(np.dot(X, true_coefficient), y))
plt.figure(figsize=(10, 5))
coefficient_sorting = np.argsort(true_coefficient)[::-1]
plt.plot(true_coefficient[coefficient_sorting], "o", label="true")
plt.plot(linear_regression.coef_[coefficient_sorting], "o", label="linear regression")
plt.legend()
"""
Explanation: Linear Regression
$$ \text{min}_{w, b} \sum_i || w^\mathsf{T}x_i + b - y_i||^2 $$
End of explanation
"""
from sklearn.linear_model import Ridge
ridge_models = {}
training_scores = []
test_scores = []
for alpha in [100, 10, 1, .01]:
ridge = Ridge(alpha=alpha).fit(X_train, y_train)
training_scores.append(ridge.score(X_train, y_train))
test_scores.append(ridge.score(X_test, y_test))
ridge_models[alpha] = ridge
plt.figure()
plt.plot(training_scores, label="training scores")
plt.plot(test_scores, label="test scores")
plt.xticks(range(4), [100, 10, 1, .01])
plt.legend(loc="best")
plt.figure(figsize=(10, 5))
plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b')
for i, alpha in enumerate([100, 10, 1, .01]):
plt.plot(ridge_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.))
plt.legend(loc="best")
"""
Explanation: Ridge Regression (L2 penalty)
$$ \text{min}_{w,b} \sum_i || w^\mathsf{T}x_i + b - y_i||^2 + \alpha ||w||_2^2$$
End of explanation
"""
from sklearn.linear_model import Lasso
lasso_models = {}
training_scores = []
test_scores = []
for alpha in [30, 10, 1, .01]:
lasso = Lasso(alpha=alpha).fit(X_train, y_train)
training_scores.append(lasso.score(X_train, y_train))
test_scores.append(lasso.score(X_test, y_test))
lasso_models[alpha] = lasso
plt.figure()
plt.plot(training_scores, label="training scores")
plt.plot(test_scores, label="test scores")
plt.xticks(range(4), [30, 10, 1, .01])
plt.legend(loc="best")
plt.figure(figsize=(10, 5))
plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b')
for i, alpha in enumerate([30, 10, 1, .01]):
plt.plot(lasso_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.))
plt.legend(loc="best")
"""
Explanation: Lasso (L1 penalty)
$$ \text{min}_{w, b} \sum_i || w^\mathsf{T}x_i + b - y_i||^2 + \alpha ||w||_1$$
End of explanation
"""
from plots import plot_linear_svc_regularization
plot_linear_svc_regularization()
"""
Explanation: Linear models for classification
y_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_ > 0
The influence of C in LinearSVC
End of explanation
"""
from sklearn.datasets import make_blobs
plt.figure()
X, y = make_blobs(random_state=42)
plt.scatter(X[:, 0], X[:, 1], c=y)
from sklearn.svm import LinearSVC
linear_svm = LinearSVC().fit(X, y)
print(linear_svm.coef_.shape)
print(linear_svm.intercept_.shape)
plt.scatter(X[:, 0], X[:, 1], c=y)
line = np.linspace(-15, 15)
for coef, intercept in zip(linear_svm.coef_, linear_svm.intercept_):
plt.plot(line, -(line * coef[0] + intercept) / coef[1])
plt.ylim(-10, 15)
plt.xlim(-10, 8)
"""
Explanation: Multi-Class linear classification
End of explanation
"""
|
Becksteinlab/PSAnalysisTutorial
|
psa_identifier_example.ipynb
|
gpl-3.0
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
# Suppress FutureWarning about element-wise comparison to None
# Occurs when calling PSA plotting functions
import warnings
warnings.filterwarnings('ignore')
"""
Explanation: Using PairID to extract PSA data
Here we will use the convenience class PSAIdentifier to extract Hausdorff pair analysis data generated by PSA.
End of explanation
"""
from MDAnalysis import Universe
from MDAnalysis.analysis.psa import PSAnalysis
from pair_id import PairID
"""
Explanation: Set up input data for PSA using MDAnalysis
End of explanation
"""
method_names = ['DIMS','FRODA','rTMD-F','rTMD-S']
labels = [] # Heat map labels (not plotted in this example)
simulations = [] # List of simulation topology/trajectory filename pairs
universes = [] # List of MDAnalysis Universes representing simulations
"""
Explanation: Initialize lists for the methods on which to perform PSA. PSA will be performed for four different simulations methods with three runs for each: DIMS, FRODA, rTMD-F, and rTMD-S. Also initialize a PSAIdentifier object to keep track of the data corresponding to comparisons between pairs of simulations.
End of explanation
"""
for method in method_names:
# Note: DIMS uses the PSF topology format
topname = 'top.psf' if 'DIMS' in method or 'TMD' in method else 'top.pdb'
pathname = 'fitted_psa.dcd'
method_dir = 'methods/{}'.format(method)
if method is not 'LinInt':
for run in xrange(1, 4): # 3 runs per method
run_dir = '{}/{:03n}'.format(method_dir, run)
topology = '{}/{}'.format(method_dir, topname)
trajectory = '{}/{}'.format(run_dir, pathname)
labels.append(method + '(' + str(run) + ')')
simulations.append((topology, trajectory))
else: # only one LinInt trajectory
topology = '{}/{}'.format(method_dir, topname)
trajectory = '{}/{}'.format(method_dir, pathname)
labels.append(method)
simulations.append((topology, trajectory))
"""
Explanation: For each method, get the topology and each of three total trajectories (per method). Each simulation is represented as a (topology, trajectory) pair of file names, which is appended to a master list of simulations.
End of explanation
"""
for sim in simulations:
universes.append(Universe(*sim))
"""
Explanation: Generate a list of universes from the list of simulations.
End of explanation
"""
psa_hpa = PSAnalysis(universes, path_select='name CA', labels=labels)
"""
Explanation: Perform a path similarity analysis
Initialize a PSA comparison from the universe list using a C$_\alpha$ trajectory representation.
End of explanation
"""
psa_hpa.generate_paths()
"""
Explanation: Generate PSA Paths from the trajectories
End of explanation
"""
psa_hpa.run_pairs_analysis(hausdorff_pairs=True, neighbors=True)
"""
Explanation: Perform a Hausdorff pairs analysis on all of the Paths
End of explanation
"""
identifier = PairID()
for name in method_names:
identifier.add_sim(name, [1,2,3])
"""
Explanation: Extract specific data from PSA
End of explanation
"""
pid = identifier.get_pair_id('DIMS 2', 'rTMD-F 3')
"""
Explanation: Get the PSA ID for the second DIMS simulation (DIMS 2) and third rTMD-F simulation (rTMD-F 3)
End of explanation
"""
psa_hpa.hausdorff_pairs[pid]
psa_hpa.hausdorff_pairs[pid]['frames']
"""
Explanation: Use the PSA ID to locate the Hausdorff analysis data for the DIMS 2/rTMD-F 3 comparison:
Get the indices of the frames for the DIMS 2 and rTMD-F 3 paths corresponding to the Hausdorff pair
End of explanation
"""
psa_hpa.hausdorff_pairs[pid]['distance']
"""
Explanation: Get the rmsd separating the Hausdorff pair (this is the Hausdorff distance!)
End of explanation
"""
psa_hpa.nearest_neighbors[pid]['frames']
"""
Explanation: Get the indices of the nearest neighbor frames for the DIMS 2 and rTMD-F 3 paths
End of explanation
"""
psa_hpa.nearest_neighbors[pid]['distances']
"""
Explanation: Get the nearest neighbor rmsds for the paths
End of explanation
"""
df = identifier.data
df
"""
Explanation: Display the pandas DataFrame containing the set of simulations
End of explanation
"""
df.loc[('DIMS',[1,2,3]), 'Sim ID']
"""
Explanation: Get the simulation IDs for DIMS simulations 1, 2, and 3
End of explanation
"""
|
Chipe1/aima-python
|
logic.ipynb
|
mit
|
from utils import *
from logic import *
from notebook import psource
"""
Explanation: Logic
This Jupyter notebook acts as supporting material for topics covered in Chapter 6 Logical Agents, Chapter 7 First-Order Logic and Chapter 8 Inference in First-Order Logic of the book Artificial Intelligence: A Modern Approach. We make use of the implementations in the logic.py module. See the intro notebook for instructions.
Let's first import everything from the logic module.
End of explanation
"""
Symbol('x')
"""
Explanation: CONTENTS
Logical sentences
Expr
PropKB
Knowledge-based agents
Inference in propositional knowledge base
Truth table enumeration
Proof by resolution
Forward and backward chaining
DPLL
WalkSAT
SATPlan
FolKB
Inference in first order knowledge base
Unification
Forward chaining algorithm
Backward chaining algorithm
Logical Sentences
The Expr class is designed to represent any kind of mathematical expression. The simplest type of Expr is a symbol, which can be defined with the function Symbol:
End of explanation
"""
(x, y, P, Q, f) = symbols('x, y, P, Q, f')
"""
Explanation: Or we can define multiple symbols at the same time with the function symbols:
End of explanation
"""
P & ~Q
"""
Explanation: We can combine Exprs with the regular Python infix and prefix operators. Here's how we would form the logical sentence "P and not Q":
End of explanation
"""
sentence = P & ~Q
sentence.op
sentence.args
P.op
P.args
Pxy = P(x, y)
Pxy.op
Pxy.args
"""
Explanation: This works because the Expr class overloads the & operator with this definition:
python
def __and__(self, other): return Expr('&', self, other)
and does similar overloads for the other operators. An Expr has two fields: op for the operator, which is always a string, and args for the arguments, which is a tuple of 0 or more expressions. By "expression," I mean either an instance of Expr, or a number. Let's take a look at the fields for some Expr examples:
End of explanation
"""
3 * f(x, y) + P(y) / 2 + 1
"""
Explanation: It is important to note that the Expr class does not define the logic of Propositional Logic sentences; it just gives you a way to represent expressions. Think of an Expr as an abstract syntax tree. Each of the args in an Expr can be either a symbol, a number, or a nested Expr. We can nest these trees to any depth. Here is a deply nested Expr:
End of explanation
"""
~(P & Q) |'==>'| (~P | ~Q)
"""
Explanation: Operators for Constructing Logical Sentences
Here is a table of the operators that can be used to form sentences. Note that we have a problem: we want to use Python operators to make sentences, so that our programs (and our interactive sessions like the one here) will show simple code. But Python does not allow implication arrows as operators, so for now we have to use a more verbose notation that Python does allow: |'==>'| instead of just ==>. Alternately, you can always use the more verbose Expr constructor forms:
| Operation | Book | Python Infix Input | Python Output | Python Expr Input
|--------------------------|----------------------|-------------------------|---|---|
| Negation | ¬ P | ~P | ~P | Expr('~', P)
| And | P ∧ Q | P & Q | P & Q | Expr('&', P, Q)
| Or | P ∨ Q | P<tt> | </tt>Q| P<tt> | </tt>Q | Expr('|', P, Q)
| Inequality (Xor) | P ≠ Q | P ^ Q | P ^ Q | Expr('^', P, Q)
| Implication | P → Q | P <tt>|</tt>'==>'<tt>|</tt> Q | P ==> Q | Expr('==>', P, Q)
| Reverse Implication | Q ← P | Q <tt>|</tt>'<=='<tt>|</tt> P |Q <== P | Expr('<==', Q, P)
| Equivalence | P ↔ Q | P <tt>|</tt>'<=>'<tt>|</tt> Q |P <=> Q | Expr('<=>', P, Q)
Here's an example of defining a sentence with an implication arrow:
End of explanation
"""
expr('~(P & Q) ==> (~P | ~Q)')
"""
Explanation: expr: a Shortcut for Constructing Sentences
If the |'==>'| notation looks ugly to you, you can use the function expr instead:
End of explanation
"""
expr('sqrt(b ** 2 - 4 * a * c)')
"""
Explanation: expr takes a string as input, and parses it into an Expr. The string can contain arrow operators: ==>, <==, or <=>, which are handled as if they were regular Python infix operators. And expr automatically defines any symbols, so you don't need to pre-define them:
End of explanation
"""
wumpus_kb = PropKB()
"""
Explanation: For now that's all you need to know about expr. If you are interested, we explain the messy details of how expr is implemented and how |'==>'| is handled in the appendix.
Propositional Knowledge Bases: PropKB
The class PropKB can be used to represent a knowledge base of propositional logic sentences.
We see that the class KB has four methods, apart from __init__. A point to note here: the ask method simply calls the ask_generator method. Thus, this one has already been implemented, and what you'll have to actually implement when you create your own knowledge base class (though you'll probably never need to, considering the ones we've created for you) will be the ask_generator function and not the ask function itself.
The class PropKB now.
* __init__(self, sentence=None) : The constructor __init__ creates a single field clauses which will be a list of all the sentences of the knowledge base. Note that each one of these sentences will be a 'clause' i.e. a sentence which is made up of only literals and ors.
* tell(self, sentence) : When you want to add a sentence to the KB, you use the tell method. This method takes a sentence, converts it to its CNF, extracts all the clauses, and adds all these clauses to the clauses field. So, you need not worry about telling only clauses to the knowledge base. You can tell the knowledge base a sentence in any form that you wish; converting it to CNF and adding the resulting clauses will be handled by the tell method.
* ask_generator(self, query) : The ask_generator function is used by the ask function. It calls the tt_entails function, which in turn returns True if the knowledge base entails query and False otherwise. The ask_generator itself returns an empty dict {} if the knowledge base entails query and None otherwise. This might seem a little bit weird to you. After all, it makes more sense just to return a True or a False instead of the {} or None But this is done to maintain consistency with the way things are in First-Order Logic, where an ask_generator function is supposed to return all the substitutions that make the query true. Hence the dict, to return all these substitutions. I will be mostly be using the ask function which returns a {} or a False, but if you don't like this, you can always use the ask_if_true function which returns a True or a False.
* retract(self, sentence) : This function removes all the clauses of the sentence given, from the knowledge base. Like the tell function, you don't have to pass clauses to remove them from the knowledge base; any sentence will do fine. The function will take care of converting that sentence to clauses and then remove those.
Wumpus World KB
Let us create a PropKB for the wumpus world with the sentences mentioned in section 7.4.3.
End of explanation
"""
P11, P12, P21, P22, P31, B11, B21 = expr('P11, P12, P21, P22, P31, B11, B21')
"""
Explanation: We define the symbols we use in our clauses.<br/>
$P_{x, y}$ is true if there is a pit in [x, y].<br/>
$B_{x, y}$ is true if the agent senses breeze in [x, y].<br/>
End of explanation
"""
wumpus_kb.tell(~P11)
"""
Explanation: Now we tell sentences based on section 7.4.3.<br/>
There is no pit in [1,1].
End of explanation
"""
wumpus_kb.tell(B11 | '<=>' | ((P12 | P21)))
wumpus_kb.tell(B21 | '<=>' | ((P11 | P22 | P31)))
"""
Explanation: A square is breezy if and only if there is a pit in a neighboring square. This has to be stated for each square but for now, we include just the relevant squares.
End of explanation
"""
wumpus_kb.tell(~B11)
wumpus_kb.tell(B21)
"""
Explanation: Now we include the breeze percepts for the first two squares leading up to the situation in Figure 7.3(b)
End of explanation
"""
wumpus_kb.clauses
"""
Explanation: We can check the clauses stored in a KB by accessing its clauses variable
End of explanation
"""
psource(KB_AgentProgram)
"""
Explanation: We see that the equivalence $B_{1, 1} \iff (P_{1, 2} \lor P_{2, 1})$ was automatically converted to two implications which were inturn converted to CNF which is stored in the KB.<br/>
$B_{1, 1} \iff (P_{1, 2} \lor P_{2, 1})$ was split into $B_{1, 1} \implies (P_{1, 2} \lor P_{2, 1})$ and $B_{1, 1} \Longleftarrow (P_{1, 2} \lor P_{2, 1})$.<br/>
$B_{1, 1} \implies (P_{1, 2} \lor P_{2, 1})$ was converted to $P_{1, 2} \lor P_{2, 1} \lor \neg B_{1, 1}$.<br/>
$B_{1, 1} \Longleftarrow (P_{1, 2} \lor P_{2, 1})$ was converted to $\neg (P_{1, 2} \lor P_{2, 1}) \lor B_{1, 1}$ which becomes $(\neg P_{1, 2} \lor B_{1, 1}) \land (\neg P_{2, 1} \lor B_{1, 1})$ after applying De Morgan's laws and distributing the disjunction.<br/>
$B_{2, 1} \iff (P_{1, 1} \lor P_{2, 2} \lor P_{3, 2})$ is converted in similar manner.
Knowledge based agents
A knowledge-based agent is a simple generic agent that maintains and handles a knowledge base.
The knowledge base may initially contain some background knowledge.
<br>
The purpose of a KB agent is to provide a level of abstraction over knowledge-base manipulation and is to be used as a base class for agents that work on a knowledge base.
<br>
Given a percept, the KB agent adds the percept to its knowledge base, asks the knowledge base for the best action, and tells the knowledge base that it has in fact taken that action.
<br>
Our implementation of KB-Agent is encapsulated in a class KB_AgentProgram which inherits from the KB class.
<br>
Let's have a look.
End of explanation
"""
psource(tt_check_all)
"""
Explanation: The helper functions make_percept_sentence, make_action_query and make_action_sentence are all aptly named and as expected,
make_percept_sentence makes first-order logic sentences about percepts we want our agent to receive,
make_action_query asks the underlying KB about the action that should be taken and
make_action_sentence tells the underlying KB about the action it has just taken.
Inference in Propositional Knowledge Base
In this section we will look at two algorithms to check if a sentence is entailed by the KB. Our goal is to decide whether $\text{KB} \vDash \alpha$ for some sentence $\alpha$.
Truth Table Enumeration
It is a model-checking approach which, as the name suggests, enumerates all possible models in which the KB is true and checks if $\alpha$ is also true in these models. We list the $n$ symbols in the KB and enumerate the $2^{n}$ models in a depth-first manner and check the truth of KB and $\alpha$.
End of explanation
"""
psource(tt_entails)
"""
Explanation: The algorithm basically computes every line of the truth table $KB\implies \alpha$ and checks if it is true everywhere.
<br>
If symbols are defined, the routine recursively constructs every combination of truth values for the symbols and then,
it checks whether model is consistent with kb.
The given models correspond to the lines in the truth table,
which have a true in the KB column,
and for these lines it checks whether the query evaluates to true
<br>
result = pl_true(alpha, model).
<br>
<br>
In short, tt_check_all evaluates this logical expression for each model
<br>
pl_true(kb, model) => pl_true(alpha, model)
<br>
which is logically equivalent to
<br>
pl_true(kb, model) & ~pl_true(alpha, model)
<br>
that is, the knowledge base and the negation of the query are logically inconsistent.
<br>
<br>
tt_entails() just extracts the symbols from the query and calls tt_check_all() with the proper parameters.
End of explanation
"""
tt_entails(P & Q, Q)
"""
Explanation: Keep in mind that for two symbols P and Q, P => Q is false only when P is True and Q is False.
Example usage of tt_entails():
End of explanation
"""
tt_entails(P | Q, Q)
tt_entails(P | Q, P)
"""
Explanation: P & Q is True only when both P and Q are True. Hence, (P & Q) => Q is True
End of explanation
"""
(A, B, C, D, E, F, G) = symbols('A, B, C, D, E, F, G')
tt_entails(A & (B | C) & D & E & ~(F | G), A & D & E & ~F & ~G)
"""
Explanation: If we know that P | Q is true, we cannot infer the truth values of P and Q.
Hence (P | Q) => Q is False and so is (P | Q) => P.
End of explanation
"""
wumpus_kb.ask_if_true(~P11), wumpus_kb.ask_if_true(P11)
"""
Explanation: We can see that for the KB to be true, A, D, E have to be True and F and G have to be False.
Nothing can be said about B or C.
Coming back to our problem, note that tt_entails() takes an Expr which is a conjunction of clauses as the input instead of the KB itself.
You can use the ask_if_true() method of PropKB which does all the required conversions.
Let's check what wumpus_kb tells us about $P_{1, 1}$.
End of explanation
"""
wumpus_kb.ask_if_true(~P22), wumpus_kb.ask_if_true(P22)
"""
Explanation: Looking at Figure 7.9 we see that in all models in which the knowledge base is True, $P_{1, 1}$ is False. It makes sense that ask_if_true() returns True for $\alpha = \neg P_{1, 1}$ and False for $\alpha = P_{1, 1}$. This begs the question, what if $\alpha$ is True in only a portion of all models. Do we return True or False? This doesn't rule out the possibility of $\alpha$ being True but it is not entailed by the KB so we return False in such cases. We can see this is the case for $P_{2, 2}$ and $P_{3, 1}$.
End of explanation
"""
psource(to_cnf)
"""
Explanation: Proof by Resolution
Recall that our goal is to check whether $\text{KB} \vDash \alpha$ i.e. is $\text{KB} \implies \alpha$ true in every model. Suppose we wanted to check if $P \implies Q$ is valid. We check the satisfiability of $\neg (P \implies Q)$, which can be rewritten as $P \land \neg Q$. If $P \land \neg Q$ is unsatisfiable, then $P \implies Q$ must be true in all models. This gives us the result "$\text{KB} \vDash \alpha$ <em>if and only if</em> $\text{KB} \land \neg \alpha$ is unsatisfiable".<br/>
This technique corresponds to <em>proof by <strong>contradiction</strong></em>, a standard mathematical proof technique. We assume $\alpha$ to be false and show that this leads to a contradiction with known axioms in $\text{KB}$. We obtain a contradiction by making valid inferences using inference rules. In this proof we use a single inference rule, <strong>resolution</strong> which states $(l_1 \lor \dots \lor l_k) \land (m_1 \lor \dots \lor m_n) \land (l_i \iff \neg m_j) \implies l_1 \lor \dots \lor l_{i - 1} \lor l_{i + 1} \lor \dots \lor l_k \lor m_1 \lor \dots \lor m_{j - 1} \lor m_{j + 1} \lor \dots \lor m_n$. Applying the resolution yields us a clause which we add to the KB. We keep doing this until:
There are no new clauses that can be added, in which case $\text{KB} \nvDash \alpha$.
Two clauses resolve to yield the <em>empty clause</em>, in which case $\text{KB} \vDash \alpha$.
The <em>empty clause</em> is equivalent to <em>False</em> because it arises only from resolving two complementary
unit clauses such as $P$ and $\neg P$ which is a contradiction as both $P$ and $\neg P$ can't be <em>True</em> at the same time.
There is one catch however, the algorithm that implements proof by resolution cannot handle complex sentences.
Implications and bi-implications have to be simplified into simpler clauses.
We already know that every sentence of a propositional logic is logically equivalent to a conjunction of clauses.
We will use this fact to our advantage and simplify the input sentence into the conjunctive normal form (CNF) which is a conjunction of disjunctions of literals.
For eg:
<br>
$$(A\lor B)\land (\neg B\lor C\lor\neg D)\land (D\lor\neg E)$$
This is equivalent to the POS (Product of sums) form in digital electronics.
<br>
Here's an outline of how the conversion is done:
1. Convert bi-implications to implications
<br>
$\alpha\iff\beta$ can be written as $(\alpha\implies\beta)\land(\beta\implies\alpha)$
<br>
This also applies to compound sentences
<br>
$\alpha\iff(\beta\lor\gamma)$ can be written as $(\alpha\implies(\beta\lor\gamma))\land((\beta\lor\gamma)\implies\alpha)$
<br>
2. Convert implications to their logical equivalents
<br>
$\alpha\implies\beta$ can be written as $\neg\alpha\lor\beta$
<br>
3. Move negation inwards
<br>
CNF requires atomic literals. Hence, negation cannot appear on a compound statement.
De Morgan's laws will be helpful here.
<br>
$\neg(\alpha\land\beta)\equiv(\neg\alpha\lor\neg\beta)$
<br>
$\neg(\alpha\lor\beta)\equiv(\neg\alpha\land\neg\beta)$
<br>
4. Distribute disjunction over conjunction
<br>
Disjunction and conjunction are distributive over each other.
Now that we only have conjunctions, disjunctions and negations in our expression,
we will distribute disjunctions over conjunctions wherever possible as this will give us a sentence which is a conjunction of simpler clauses,
which is what we wanted in the first place.
<br>
We need a term of the form
<br>
$(\alpha_{1}\lor\alpha_{2}\lor\alpha_{3}...)\land(\beta_{1}\lor\beta_{2}\lor\beta_{3}...)\land(\gamma_{1}\lor\gamma_{2}\lor\gamma_{3}...)\land...$
<br>
<br>
The to_cnf function executes this conversion using helper subroutines.
End of explanation
"""
psource(eliminate_implications)
psource(move_not_inwards)
psource(distribute_and_over_or)
"""
Explanation: to_cnf calls three subroutines.
<br>
eliminate_implications converts bi-implications and implications to their logical equivalents.
<br>
move_not_inwards removes negations from compound statements and moves them inwards using De Morgan's laws.
<br>
distribute_and_over_or distributes disjunctions over conjunctions.
<br>
Run the cell below for implementation details.
End of explanation
"""
A, B, C, D = expr('A, B, C, D')
to_cnf(A |'<=>'| B)
to_cnf(A |'<=>'| (B & C))
to_cnf(A & (B | (C & D)))
to_cnf((A |'<=>'| ~B) |'==>'| (C | ~D))
"""
Explanation: Let's convert some sentences to see how it works
End of explanation
"""
psource(pl_resolution)
pl_resolution(wumpus_kb, ~P11), pl_resolution(wumpus_kb, P11)
pl_resolution(wumpus_kb, ~P22), pl_resolution(wumpus_kb, P22)
"""
Explanation: Coming back to our resolution problem, we can see how the to_cnf function is utilized here
End of explanation
"""
psource(PropDefiniteKB.clauses_with_premise)
"""
Explanation: Forward and backward chaining
Previously, we said we will look at two algorithms to check if a sentence is entailed by the KB. Here's a third one.
The difference here is that our goal now is to determine if a knowledge base of definite clauses entails a single proposition symbol q - the query.
There is a catch however - the knowledge base can only contain Horn clauses.
<br>
Horn Clauses
Horn clauses can be defined as a disjunction of literals with at most one positive literal.
<br>
A Horn clause with exactly one positive literal is called a definite clause.
<br>
A Horn clause might look like
<br>
$\neg a\lor\neg b\lor\neg c\lor\neg d... \lor z$
<br>
This, coincidentally, is also a definite clause.
<br>
Using De Morgan's laws, the example above can be simplified to
<br>
$a\land b\land c\land d ... \implies z$
<br>
This seems like a logical representation of how humans process known data and facts.
Assuming percepts a, b, c, d ... to be true simultaneously, we can infer z to also be true at that point in time.
There are some interesting aspects of Horn clauses that make algorithmic inference or resolution easier.
- Definite clauses can be written as implications:
<br>
The most important simplification a definite clause provides is that it can be written as an implication.
The premise (or the knowledge that leads to the implication) is a conjunction of positive literals.
The conclusion (the implied statement) is also a positive literal.
The sentence thus becomes easier to understand.
The premise and the conclusion are conventionally called the body and the head respectively.
A single positive literal is called a fact.
- Forward chaining and backward chaining can be used for inference from Horn clauses:
<br>
Forward chaining is semantically identical to AND-OR-Graph-Search from the chapter on search algorithms.
Implementational details will be explained shortly.
- Deciding entailment with Horn clauses is linear in size of the knowledge base:
<br>
Surprisingly, the forward and backward chaining algorithms traverse each element of the knowledge base at most once, greatly simplifying the problem.
<br>
<br>
The function pl_fc_entails implements forward chaining to see if a knowledge base KB entails a symbol q.
<br>
Before we proceed further, note that pl_fc_entails doesn't use an ordinary KB instance.
The knowledge base here is an instance of the PropDefiniteKB class, derived from the PropKB class,
but modified to store definite clauses.
<br>
The main point of difference arises in the inclusion of a helper method to PropDefiniteKB that returns a list of clauses in KB that have a given symbol p in their premise.
End of explanation
"""
psource(pl_fc_entails)
"""
Explanation: Let's now have a look at the pl_fc_entails algorithm.
End of explanation
"""
clauses = ['(B & F)==>E',
'(A & E & F)==>G',
'(B & C)==>F',
'(A & B)==>D',
'(E & F)==>H',
'(H & I)==>J',
'A',
'B',
'C']
"""
Explanation: The function accepts a knowledge base KB (an instance of PropDefiniteKB) and a query q as inputs.
<br>
<br>
count initially stores the number of symbols in the premise of each sentence in the knowledge base.
<br>
The conjuncts helper function separates a given sentence at conjunctions.
<br>
inferred is initialized as a boolean defaultdict.
This will be used later to check if we have inferred all premises of each clause of the agenda.
<br>
agenda initially stores a list of clauses that the knowledge base knows to be true.
The is_prop_symbol helper function checks if the given symbol is a valid propositional logic symbol.
<br>
<br>
We now iterate through agenda, popping a symbol p on each iteration.
If the query q is the same as p, we know that entailment holds.
<br>
The agenda is processed, reducing count by one for each implication with a premise p.
A conclusion is added to the agenda when count reaches zero. This means we know all the premises of that particular implication to be true.
<br>
clauses_with_premise is a helpful method of the PropKB class.
It returns a list of clauses in the knowledge base that have p in their premise.
<br>
<br>
Now that we have an idea of how this function works, let's see a few examples of its usage, but we first need to define our knowledge base. We assume we know the following clauses to be true.
End of explanation
"""
definite_clauses_KB = PropDefiniteKB()
for clause in clauses:
definite_clauses_KB.tell(expr(clause))
"""
Explanation: We will now tell this information to our knowledge base.
End of explanation
"""
pl_fc_entails(definite_clauses_KB, expr('G'))
pl_fc_entails(definite_clauses_KB, expr('H'))
pl_fc_entails(definite_clauses_KB, expr('I'))
pl_fc_entails(definite_clauses_KB, expr('J'))
"""
Explanation: We can now check if our knowledge base entails the following queries.
End of explanation
"""
psource(dpll)
"""
Explanation: Effective Propositional Model Checking
The previous segments elucidate the algorithmic procedure for model checking.
In this segment, we look at ways of making them computationally efficient.
<br>
The problem we are trying to solve is conventionally called the propositional satisfiability problem, abbreviated as the SAT problem.
In layman terms, if there exists a model that satisfies a given Boolean formula, the formula is called satisfiable.
<br>
The SAT problem was the first problem to be proven NP-complete.
The main characteristics of an NP-complete problem are:
- Given a solution to such a problem, it is easy to verify if the solution solves the problem.
- The time required to actually solve the problem using any known algorithm increases exponentially with respect to the size of the problem.
<br>
<br>
Due to these properties, heuristic and approximational methods are often applied to find solutions to these problems.
<br>
It is extremely important to be able to solve large scale SAT problems efficiently because
many combinatorial problems in computer science can be conveniently reduced to checking the satisfiability of a propositional sentence under some constraints.
<br>
We will introduce two new algorithms that perform propositional model checking in a computationally effective way.
<br>
1. DPLL (Davis-Putnam-Logeman-Loveland) algorithm
This algorithm is very similar to Backtracking-Search.
It recursively enumerates possible models in a depth-first fashion with the following improvements over algorithms like tt_entails:
1. Early termination:
<br>
In certain cases, the algorithm can detect the truth value of a statement using just a partially completed model.
For example, $(P\lor Q)\land(P\lor R)$ is true if P is true, regardless of other variables.
This reduces the search space significantly.
2. Pure symbol heuristic:
<br>
A symbol that has the same sign (positive or negative) in all clauses is called a pure symbol.
It isn't difficult to see that any satisfiable model will have the pure symbols assigned such that its parent clause becomes true.
For example, $(P\lor\neg Q)\land(\neg Q\lor\neg R)\land(R\lor P)$ has P and Q as pure symbols
and for the sentence to be true, P has to be true and Q has to be false.
The pure symbol heuristic thus simplifies the problem a bit.
3. Unit clause heuristic:
<br>
In the context of DPLL, clauses with just one literal and clauses with all but one false literals are called unit clauses.
If a clause is a unit clause, it can only be satisfied by assigning the necessary value to make the last literal true.
We have no other choice.
<br>
Assigning one unit clause can create another unit clause.
For example, when P is false, $(P\lor Q)$ becomes a unit clause, causing true to be assigned to Q.
A series of forced assignments derived from previous unit clauses is called unit propagation.
In this way, this heuristic simplifies the problem further.
<br>
The algorithm often employs other tricks to scale up to large problems.
However, these tricks are currently out of the scope of this notebook. Refer to section 7.6 of the book for more details.
<br>
<br>
Let's have a look at the algorithm.
End of explanation
"""
psource(dpll_satisfiable)
"""
Explanation: The algorithm uses the ideas described above to check satisfiability of a sentence in propositional logic.
It recursively calls itself, simplifying the problem at each step. It also uses helper functions find_pure_symbol and find_unit_clause to carry out steps 2 and 3 above.
<br>
The dpll_satisfiable helper function converts the input clauses to conjunctive normal form and calls the dpll function with the correct parameters.
End of explanation
"""
A, B, C, D = expr('A, B, C, D')
dpll_satisfiable(A & B & ~C & D)
"""
Explanation: Let's see a few examples of usage.
End of explanation
"""
dpll_satisfiable((A & B) | (C & ~A) | (B & ~D))
"""
Explanation: This is a simple case to highlight that the algorithm actually works.
End of explanation
"""
dpll_satisfiable(A |'<=>'| B)
dpll_satisfiable((A |'<=>'| B) |'==>'| (C & ~A))
dpll_satisfiable((A | (B & C)) |'<=>'| ((A | B) & (A | C)))
"""
Explanation: If a particular symbol isn't present in the solution,
it means that the solution is independent of the value of that symbol.
In this case, the solution is independent of A.
End of explanation
"""
psource(WalkSAT)
"""
Explanation: 2. WalkSAT algorithm
This algorithm is very similar to Hill climbing.
On every iteration, the algorithm picks an unsatisfied clause and flips a symbol in the clause.
This is similar to finding a neighboring state in the hill_climbing algorithm.
<br>
The symbol to be flipped is decided by an evaluation function that counts the number of unsatisfied clauses.
Sometimes, symbols are also flipped randomly to avoid local optima. A subtle balance between greediness and randomness is required. Alternatively, some versions of the algorithm restart with a completely new random assignment if no solution has been found for too long as a way of getting out of local minima of numbers of unsatisfied clauses.
<br>
<br>
Let's have a look at the algorithm.
End of explanation
"""
A, B, C, D = expr('A, B, C, D')
WalkSAT([A, B, ~C, D], 0.5, 100)
"""
Explanation: The function takes three arguments:
<br>
1. The clauses we want to satisfy.
<br>
2. The probability p of randomly changing a symbol.
<br>
3. The maximum number of flips (max_flips) the algorithm will run for. If the clauses are still unsatisfied, the algorithm returns None to denote failure.
<br>
The algorithm is identical in concept to Hill climbing and the code isn't difficult to understand.
<br>
<br>
Let's see a few examples of usage.
End of explanation
"""
WalkSAT([A & B, A & C], 0.5, 100)
WalkSAT([A & B, C & D, C & B], 0.5, 100)
WalkSAT([A & B, C | D, ~(D | B)], 0.5, 1000)
"""
Explanation: This is a simple case to show that the algorithm converges.
End of explanation
"""
def WalkSAT_CNF(sentence, p=0.5, max_flips=10000):
return WalkSAT(conjuncts(to_cnf(sentence)), 0, max_flips)
"""
Explanation: This one doesn't give any output because WalkSAT did not find any model where these clauses hold. We can solve these clauses to see that they together form a contradiction and hence, it isn't supposed to have a solution.
One point of difference between this algorithm and the dpll_satisfiable algorithms is that both these algorithms take inputs differently.
For WalkSAT to take complete sentences as input,
we can write a helper function that converts the input sentence into conjunctive normal form and then calls WalkSAT with the list of conjuncts of the CNF form of the sentence.
End of explanation
"""
WalkSAT_CNF((A & B) | (C & ~A) | (B & ~D), 0.5, 1000)
"""
Explanation: Now we can call WalkSAT_CNF and DPLL_Satisfiable with the same arguments.
End of explanation
"""
sentence_1 = A |'<=>'| B
sentence_2 = (A & B) | (C & ~A) | (B & ~D)
sentence_3 = (A | (B & C)) |'<=>'| ((A | B) & (A | C))
%%timeit
dpll_satisfiable(sentence_1)
dpll_satisfiable(sentence_2)
dpll_satisfiable(sentence_3)
%%timeit
WalkSAT_CNF(sentence_1)
WalkSAT_CNF(sentence_2)
WalkSAT_CNF(sentence_3)
"""
Explanation: It works!
<br>
Notice that the solution generated by WalkSAT doesn't omit variables that the sentence doesn't depend upon.
If the sentence is independent of a particular variable, the solution contains a random value for that variable because of the stochastic nature of the algorithm.
<br>
<br>
Let's compare the runtime of WalkSAT and DPLL for a few cases. We will use the %%timeit magic to do this.
End of explanation
"""
psource(SAT_plan)
"""
Explanation: On an average, for solvable cases, WalkSAT is quite faster than dpll because, for a small number of variables,
WalkSAT can reduce the search space significantly.
Results can be different for sentences with more symbols though.
Feel free to play around with this to understand the trade-offs of these algorithms better.
SATPlan
In this section we show how to make plans by logical inference. The basic idea is very simple. It includes the following three steps:
1. Constuct a sentence that includes:
1. A colection of assertions about the initial state.
2. The successor-state axioms for all the possible actions at each time up to some maximum time t.
3. The assertion that the goal is achieved at time t.
2. Present the whole sentence to a SAT solver.
3. Assuming a model is found, extract from the model those variables that represent actions and are assigned true. Together they represent a plan to achieve the goals.
Lets have a look at the algorithm
End of explanation
"""
transition = {'A': {'Left': 'A', 'Right': 'B'},
'B': {'Left': 'A', 'Right': 'C'},
'C': {'Left': 'B', 'Right': 'C'}}
print(SAT_plan('A', transition, 'C', 2))
print(SAT_plan('A', transition, 'B', 3))
print(SAT_plan('C', transition, 'A', 3))
"""
Explanation: Let's see few examples of its usage. First we define a transition and then call SAT_plan.
End of explanation
"""
transition = {(0, 0): {'Right': (0, 1), 'Down': (1, 0)},
(0, 1): {'Left': (1, 0), 'Down': (1, 1)},
(1, 0): {'Right': (1, 0), 'Up': (1, 0), 'Left': (1, 0), 'Down': (1, 0)},
(1, 1): {'Left': (1, 0), 'Up': (0, 1)}}
print(SAT_plan((0, 0), transition, (1, 1), 4))
"""
Explanation: Let us do the same for another transition.
End of explanation
"""
clauses = []
"""
Explanation: First-Order Logic Knowledge Bases: FolKB
The class FolKB can be used to represent a knowledge base of First-order logic sentences. You would initialize and use it the same way as you would for PropKB except that the clauses are first-order definite clauses. We will see how to write such clauses to create a database and query them in the following sections.
Criminal KB
In this section we create a FolKB based on the following paragraph.<br/>
<em>The law says that it is a crime for an American to sell weapons to hostile nations. The country Nono, an enemy of America, has some missiles, and all of its missiles were sold to it by Colonel West, who is American.</em><br/>
The first step is to extract the facts and convert them into first-order definite clauses. Extracting the facts from data alone is a challenging task. Fortunately, we have a small paragraph and can do extraction and conversion manually. We'll store the clauses in list aptly named clauses.
End of explanation
"""
clauses.append(expr("(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)"))
"""
Explanation: <em>“... it is a crime for an American to sell weapons to hostile nations”</em><br/>
The keywords to look for here are 'crime', 'American', 'sell', 'weapon' and 'hostile'. We use predicate symbols to make meaning of them.
Criminal(x): x is a criminal
American(x): x is an American
Sells(x ,y, z): x sells y to z
Weapon(x): x is a weapon
Hostile(x): x is a hostile nation
Let us now combine them with appropriate variable naming to depict the meaning of the sentence. The criminal x is also the American x who sells weapon y to z, which is a hostile nation.
$\text{American}(x) \land \text{Weapon}(y) \land \text{Sells}(x, y, z) \land \text{Hostile}(z) \implies \text{Criminal} (x)$
End of explanation
"""
clauses.append(expr("Enemy(Nono, America)"))
"""
Explanation: <em>"The country Nono, an enemy of America"</em><br/>
We now know that Nono is an enemy of America. We represent these nations using the constant symbols Nono and America. the enemy relation is show using the predicate symbol Enemy.
$\text{Enemy}(\text{Nono}, \text{America})$
End of explanation
"""
clauses.append(expr("Owns(Nono, M1)"))
clauses.append(expr("Missile(M1)"))
"""
Explanation: <em>"Nono ... has some missiles"</em><br/>
This states the existence of some missile which is owned by Nono. $\exists x \text{Owns}(\text{Nono}, x) \land \text{Missile}(x)$. We invoke existential instantiation to introduce a new constant M1 which is the missile owned by Nono.
$\text{Owns}(\text{Nono}, \text{M1}), \text{Missile}(\text{M1})$
End of explanation
"""
clauses.append(expr("(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)"))
"""
Explanation: <em>"All of its missiles were sold to it by Colonel West"</em><br/>
If Nono owns something and it classifies as a missile, then it was sold to Nono by West.
$\text{Missile}(x) \land \text{Owns}(\text{Nono}, x) \implies \text{Sells}(\text{West}, x, \text{Nono})$
End of explanation
"""
clauses.append(expr("American(West)"))
"""
Explanation: <em>"West, who is American"</em><br/>
West is an American.
$\text{American}(\text{West})$
End of explanation
"""
clauses.append(expr("Missile(x) ==> Weapon(x)"))
clauses.append(expr("Enemy(x, America) ==> Hostile(x)"))
"""
Explanation: We also know, from our understanding of language, that missiles are weapons and that an enemy of America counts as “hostile”.
$\text{Missile}(x) \implies \text{Weapon}(x), \text{Enemy}(x, \text{America}) \implies \text{Hostile}(x)$
End of explanation
"""
crime_kb = FolKB(clauses)
"""
Explanation: Now that we have converted the information into first-order definite clauses we can create our first-order logic knowledge base.
End of explanation
"""
psource(subst)
"""
Explanation: The subst helper function substitutes variables with given values in first-order logic statements.
This will be useful in later algorithms.
It's implementation is quite simple and self-explanatory.
End of explanation
"""
subst({x: expr('Nono'), y: expr('M1')}, expr('Owns(x, y)'))
"""
Explanation: Here's an example of how subst can be used.
End of explanation
"""
unify(expr('x'), 3)
unify(expr('A(x)'), expr('A(B)'))
unify(expr('Cat(x) & Dog(Dobby)'), expr('Cat(Bella) & Dog(y)'))
"""
Explanation: Inference in First-Order Logic
In this section we look at a forward chaining and a backward chaining algorithm for FolKB. Both aforementioned algorithms rely on a process called <strong>unification</strong>, a key component of all first-order inference algorithms.
Unification
We sometimes require finding substitutions that make different logical expressions look identical. This process, called unification, is done by the unify algorithm. It takes as input two sentences and returns a <em>unifier</em> for them if one exists. A unifier is a dictionary which stores the substitutions required to make the two sentences identical. It does so by recursively unifying the components of a sentence, where the unification of a variable symbol var with a constant symbol Const is the mapping {var: Const}. Let's look at a few examples.
End of explanation
"""
print(unify(expr('Cat(x)'), expr('Dog(Dobby)')))
"""
Explanation: In cases where there is no possible substitution that unifies the two sentences the function return None.
End of explanation
"""
print(unify(expr('Cat(x) & Dog(Dobby)'), expr('Cat(Bella) & Dog(x)')))
"""
Explanation: We also need to take care we do not unintentionally use the same variable name. Unify treats them as a single variable which prevents it from taking multiple value.
End of explanation
"""
psource(fol_fc_ask)
"""
Explanation: Forward Chaining Algorithm
We consider the simple forward-chaining algorithm presented in <em>Figure 9.3</em>. We look at each rule in the knowledge base and see if the premises can be satisfied. This is done by finding a substitution which unifies each of the premise with a clause in the KB. If we are able to unify the premises, the conclusion (with the corresponding substitution) is added to the KB. This inferencing process is repeated until either the query can be answered or till no new sentences can be added. We test if the newly added clause unifies with the query in which case the substitution yielded by unify is an answer to the query. If we run out of sentences to infer, this means the query was a failure.
The function fol_fc_ask is a generator which yields all substitutions which validate the query.
End of explanation
"""
answer = fol_fc_ask(crime_kb, expr('Hostile(x)'))
print(list(answer))
"""
Explanation: Let's find out all the hostile nations. Note that we only told the KB that Nono was an enemy of America, not that it was hostile.
End of explanation
"""
crime_kb.tell(expr('Enemy(JaJa, America)'))
answer = fol_fc_ask(crime_kb, expr('Hostile(x)'))
print(list(answer))
"""
Explanation: The generator returned a single substitution which says that Nono is a hostile nation. See how after adding another enemy nation the generator returns two substitutions.
End of explanation
"""
psource(fol_bc_or)
"""
Explanation: <strong><em>Note</em>:</strong> fol_fc_ask makes changes to the KB by adding sentences to it.
Backward Chaining Algorithm
This algorithm works backward from the goal, chaining through rules to find known facts that support the proof. Suppose goal is the query we want to find the substitution for. We find rules of the form $\text{lhs} \implies \text{goal}$ in the KB and try to prove lhs. There may be multiple clauses in the KB which give multiple lhs. It is sufficient to prove only one of these. But to prove a lhs all the conjuncts in the lhs of the clause must be proved. This makes it similar to <em>And/Or</em> search.
OR
The <em>OR</em> part of the algorithm comes from our choice to select any clause of the form $\text{lhs} \implies \text{goal}$. Looking at all rules's lhs whose rhs unify with the goal, we yield a substitution which proves all the conjuncts in the lhs. We use parse_definite_clause to attain lhs and rhs from a clause of the form $\text{lhs} \implies \text{rhs}$. For atomic facts the lhs is an empty list.
End of explanation
"""
psource(fol_bc_and)
"""
Explanation: AND
The <em>AND</em> corresponds to proving all the conjuncts in the lhs. We need to find a substitution which proves each <em>and</em> every clause in the list of conjuncts.
End of explanation
"""
# Rebuild KB because running fol_fc_ask would add new facts to the KB
crime_kb = FolKB(clauses)
crime_kb.ask(expr('Hostile(x)'))
"""
Explanation: Now the main function fl_bc_ask calls fol_bc_or with substitution initialized as empty. The ask method of FolKB uses fol_bc_ask and fetches the first substitution returned by the generator to answer query. Let's query the knowledge base we created from clauses to find hostile nations.
End of explanation
"""
P |'==>'| ~Q
"""
Explanation: You may notice some new variables in the substitution. They are introduced to standardize the variable names to prevent naming problems as discussed in the Unification section
Appendix: The Implementation of |'==>'|
Consider the Expr formed by this syntax:
End of explanation
"""
(P | '==>') | ~Q
"""
Explanation: What is the funny |'==>'| syntax? The trick is that "|" is just the regular Python or-operator, and so is exactly equivalent to this:
End of explanation
"""
P | '==>'
"""
Explanation: In other words, there are two applications of or-operators. Here's the first one:
End of explanation
"""
partial = PartialExpr('==>', P)
partial | ~Q
"""
Explanation: What is going on here is that the __or__ method of Expr serves a dual purpose. If the right-hand-side is another Expr (or a number), then the result is an Expr, as in (P | Q). But if the right-hand-side is a string, then the string is taken to be an operator, and we create a node in the abstract syntax tree corresponding to a partially-filled Expr, one where we know the left-hand-side is P and the operator is ==>, but we don't yet know the right-hand-side.
The PartialExpr class has an __or__ method that says to create an Expr node with the right-hand-side filled in. Here we can see the combination of the PartialExpr with Q to create a complete Expr:
End of explanation
"""
expr('~(P & Q) ==> (~P | ~Q)')
"""
Explanation: This trick is due to Ferdinand Jamitzky, with a modification by C. G. Vedant,
who suggested using a string inside the or-bars.
Appendix: The Implementation of expr
How does expr parse a string into an Expr? It turns out there are two tricks (besides the Jamitzky/Vedant trick):
We do a string substitution, replacing "==>" with "|'==>'|" (and likewise for other operators).
We eval the resulting string in an environment in which every identifier
is bound to a symbol with that identifier as the op.
In other words,
End of explanation
"""
P, Q = symbols('P, Q')
~(P & Q) |'==>'| (~P | ~Q)
"""
Explanation: is equivalent to doing:
End of explanation
"""
P & Q |'==>'| P | Q
"""
Explanation: One thing to beware of: this puts ==> at the same precedence level as "|", which is not quite right. For example, we get this:
End of explanation
"""
(P & Q) |'==>'| (P | Q)
"""
Explanation: which is probably not what we meant; when in doubt, put in extra parens:
End of explanation
"""
from notebook import Canvas_fol_bc_ask
canvas_bc_ask = Canvas_fol_bc_ask('canvas_bc_ask', crime_kb, expr('Criminal(x)'))
"""
Explanation: Examples
End of explanation
"""
|
mjabri/holoviews
|
doc/Tutorials/Containers.ipynb
|
bsd-3-clause
|
import numpy as np
import holoviews as hv
%reload_ext holoviews.ipython
"""
Explanation: This notebook serves as a reference for all the container types in HoloViews, with an extensive list of small, self-contained examples wherever possible, allowing each container type to be understood and tested independently. The container types generally need to contain Elements to be useful, which are described separately. We first cover the tree-based containers, which are used in many of the examples elsewhere:
<dl class="dl-horizontal">
<dt>[``Layout``](#Layout)</dt><dd>Collect components into a tree, displaying them side by side (``+`` operator)</dd>
<dt>[``Overlay``](#Overlay)</dt><dd>Collect components into a tree, displaying them on top of one another (``*`` operator)</dd>
</dl>
The remaining container types are most useful for exploring
<a id='ParameterSpaceIndex'></a> parameter spaces:
<dl class="dl-horizontal">
<dt>[``HoloMap``](#HoloMap)</dt><dd>Visualize N-dimensional spaces using sliders or as an animation. </dd>
<dt>[``GridSpace``](#GridSpace)</dt><dd>Parameter space in two dimensions laid out in a grid. </dd>
<dt>[``NdLayout``](#NdLayout)</dt><dd>Parameter space of any dimensionality in a layout with titles.</dd>
<dt>[``NdOverlay``](#NdOverlay)</dt><dd>Parameter space of any dimensionality in an overlay with a legend</dd>
</dl>
There is a separate Composing Data tutorial explaining how each of these can be combined and nested, once you are familiar with the individual containers.
Trees <a id='Trees'></a>
End of explanation
"""
points = [(0.1*i, np.sin(0.1*i)) for i in range(100)]
pair = hv.Curve(points) + hv.ItemTable([('A',1),('B',2)])
pair
"""
Explanation: To display detailed information about each object displayed in this notebook run the following code in a cell:
python
%output info=True
For convenience, in this tutorial we have specified %output info=True, which will pop up a detailed list and explanation of the available options for visualizing each container type, after that notebook cell is executed. So, to find out all the options for any of these container types, just press <Shift-Enter> on the corresponding cell in the live notebook. See the Options tutorial tutorial for detailed information about how to set or examine these options for a given component.
Layout <a id='Layout'></a>
Layout places nearly any possible components alongside each other, as described in more detail in the Introductory tutorial. The .cols() method of Layout can be used to regroup the components into the specified number of columns for display, if desired.
End of explanation
"""
%%opts Layout [sublabel_format="({numeric})."] Curve [sublabel_size=20] ItemTable [sublabel_position=(0.05, 0.8) sublabel_size=10]
pair
"""
Explanation: Layout subfigure labels <a id='subfigure-labels'></a>
By default, a Layout will label the subfigures as in A and B above. You can easily configure this behaviour by setting the sublabel_format option to None (no sublabels at all), or "{numeric}": 2, "{roman}": ii, "{Roman}": II, "{alpha}": b, or "{Alpha}": B, and you can also change the sublabel size and relative position:
End of explanation
"""
%%opts Image.A [aspect=1.5] Image.C [aspect=0.5 sublabel_position=(-0.7, 0.85) xticks=3]
np.random.seed(42)
(hv.Image(np.random.rand(25, 25), group='A') +
hv.Image(np.random.rand(25, 25), group='B') +
hv.Image(np.random.rand(25, 25), group='C'))
"""
Explanation: You can also set these options globally if you consistently prefer a different style, or to disable the subfigure labelling altogether, either using a line magic:
python
%opts Layout [sublabel_format=None]
or Python code:
```python
from holoviews.core.options import Store, Options
Store.options.Layout = Options('plot', sublabel_format=None)
from holoviews.plotting import ElementPlot
ElementPlot.set_param(sublabel_size=30,sublabel_position=(1.0,-0.1))
```
Layout with custom aspects
The aspect ratios of Elements in a Layout can also be adjusted allowing you to quickly compose a complex figure.
End of explanation
"""
%%opts Layout [aspect_weight=1.0] Image.A [aspect=1.5] Image.C [aspect=0.5 sublabel_position=(-0.7, 0.85) xticks=3]
np.random.seed(42)
(hv.Image(np.random.rand(25, 25), group='A') +
hv.Image(np.random.rand(25, 25), group='B') +
hv.Image(np.random.rand(25, 25), group='C'))
"""
Explanation: When aspects vary across elements, there is no single valid arrangement suitable for presenting the elements together. For instance, in the above example, the widths are kept constant while the heights are varying due to the differences in the element aspect ratios. An alternative arrangement may be achieved by setting the aspect_weight plot option from 0 (the default value shown above) to 1.0:
End of explanation
"""
import numpy as np
from holoviews import Curve, ItemTable, Empty
sine_points = [(0.1*i, np.sin(0.1*i)) for i in range(100)]
cosine_points = [(0.1*i, np.cos(0.1*i)) for i in range(100)]
(hv.ItemTable([('A',1),('B',2)]) + hv.Curve(sine_points) + hv.Empty() + hv.Curve(cosine_points)).cols(2)
"""
Explanation: The aspect_weight parameter can take any value between 0 and 1 which will adjust how any Layout containing elements with non square-aspects are presented. Note that when there are multiple rows and columns and many elements with different aspect ratios, it is often necessary to explore the effect of changing this parameter to generate a suitable plot.
Using Empty as a placeholder in Layout <a id='Empty'></a>
In order to arrange elements within a Layout, it can sometimes be useful to introduce empty gaps. For this the Empty pseudo-element may be used as follows:
End of explanation
"""
%%opts VectorField (color='r') Image (cmap='gray')
x,y = np.mgrid[-10:10,-10:10] * 0.25
sine_rings = np.sin(x**2+y**2)*np.pi+np.pi
exp_falloff = 1/np.exp((x**2+y**2)/8)
vector_data = np.array([x.flatten()/5., # X positions
y.flatten()/5., # Y positions
sine_rings.flatten(), # Arrow angles
exp_falloff.flatten()]) # Arrow sizes
hv.Image(sine_rings) * hv.VectorField(vector_data.T)
"""
Explanation: The Empty pseudo-element contains no data, cannot be customized in any way as is never associated with a sub-label. The reason Empty is called a pseudo element is that it is only allowed to be used in Layout and cannot be used as an element in any other type of container.
Overlay <a id='Overlay'></a>
Overlays are often built using * as in the Introductory tutorial, but they can also be built by hand. Using vector_data from the VectorField Element example, we can overlay the vector field on top of an Image component (or any other component, though not all combinations will be useful or clear due to occlusion):
End of explanation
"""
frequencies = np.linspace(0.5,2.0,5)
phases = np.linspace(0, np.pi*2, 5)
x,y = np.mgrid[-50:51, -50:51] * 0.1
"""
Explanation: Parameter Spaces <a id='Parameter Spaces'></a>
HoloViews also supplies container classes useful for visualizing parameter spaces or phase spaces, i.e. large collections of results for various combinations of parameters.
In addition to the container types discussed here, the HeatMap Element is also useful for visualizing small two-dimensional parameter spaces that have a single value for each location in the space. See also the separate Lancet tool, which works well with HoloViews for launching and collating results from separate computational jobs covering large parameter spaces, which HoloViews can then analyze with ease.
Specifying arbitrary parameter spaces
First let us define some numpy arrays which we will use to define the types of parameter space below.
End of explanation
"""
def sine_array(phase, freq):
return np.sin(phase + (freq*x**2+freq*y**2))
matrices = {(p, f): hv.Image(sine_array(p, f), label='Sinusoid Ring', group='Amplitude')
for f in [0.5, 1.0, 1.5, 2.0] # Frequencies
for p in [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]} # Phases
"""
Explanation: A parameter space of Image elements
End of explanation
"""
matrices[0,0.5] + matrices[np.pi,0.5]
"""
Explanation: To illustrate that matrices is a dictionary indexed by (phase, frequency) here are two of the dictionary elements side by side:
End of explanation
"""
def sine_curve(phase, freq, samples=102):
xvals = [0.1* i for i in range(samples)]
return [(x, np.sin(phase+freq*x)) for x in xvals]
curves = {(round(p,2), f): hv.Curve(sine_curve(p,f))
for f in [1,2,3,4,5] # Frequencies
for p in [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]} # Phases
"""
Explanation: A parameter space of Curve elements
End of explanation
"""
curves[0,1] + curves[3.14, 2] + curves[0,1] * curves[3.14, 2]
"""
Explanation: Here we display two of our curves and then overlay them together with * (which chooses new colors for each new curve according to a predefined color cycle that can be selected as a plot option):
End of explanation
"""
%%opts Image (cmap='gray')
hv.HoloMap(matrices, kdims=['phase', 'frequency'])
"""
Explanation: HoloMap <a id='HoloMap'></a>
A HoloMap is a very powerful multi-dimensional data structure that can hold a very large number of similar Element objects, e.g. those measured for different values in a parameter space, and then allows easy exploration, animation, and slicing of the parameter and value spaces. Usage of this type is covered extensively in the Exploring Data tutorial. Here we show how a HoloMap can be used to explore all of the different Image objects created for each combination of phase and frequency:
End of explanation
"""
%%output size=140
ring_space = hv.GridSpace(matrices, kdims=['phase', 'frequency'])
curve_space = hv.GridSpace(curves, kdims=['phase', 'frequency'])
ring_space + curve_space
"""
Explanation: GridSpace <a id='GridSpace'></a>
Whenever a HoloMap has more than one item to display, it will be visualized as an animation or using slider widgets as above, displaying one item (e.g. one Element) at any given time. If you have up to a two-dimensional parameter space, you can see all your data items at once using a GridSpace to lay out your data with labelled axes:
End of explanation
"""
from holoviews import NdLayout
hv.NdLayout(matrices, kdims=['phase', 'frequency'])[0:1.6, 0:2].cols(3)
"""
Explanation: Of course, each item in the grid can also be a combination of other plots, such as an Overlay or an NdOverlay (below).
NdLayout <a id='NdLayout'></a>
GridSpace is great when you have a two-dimensional parameter space, but fails to scale well beyond that. For higher-dimensional parameter spaces, you can use an NdLayout, where the varying key dimensions are shown in the titles of the elements:
End of explanation
"""
%%output size=150
four_curves = {(p, f): val for ((p,f), val) in curves.items() if p <=np.pi/2 and f<=2}
hv.NdOverlay(four_curves, kdims=['Phase', 'Frequency'])
"""
Explanation: NdOverlay <a id='NdOverlay'></a>
NdOverlay is to Overlay what NdLayout is to Layout, in other words it is a way of looking at a parameter space as an Overlay. This generally makes NdOverlay less useful than NdLayout, because some element types don't overlay nicely over each other (e.g. multiple Image elements just obscure one another). Also, though the NdOverlay is more compact, it is easy for an NdOverlay to present too much data at once.
Unlike a regular Overlay, the elements of an NdOverlay must always be of the same type.
To demonstrate this, we will overlay several of the curves from our phase space. To make sure the result is legible, we filter our parameter space down to four curves:
End of explanation
"""
%%output size=150
np.random.seed(10)
extents = (-3, -3, 3, 3)
hv.NdOverlay({1: hv.Points(np.random.normal(size=(50,2)), extents=extents),
2: hv.Points(np.random.normal(size=(50,2)), extents=extents),
3: hv.Points(np.random.normal(size=(50,2)), extents=extents)},
kdims=['Cluster'])
"""
Explanation: Because NdOverlay ensures all the contained elements are of the same type, it can now supply a useful legend. As with everything in HoloViews, overlaying is a very general concept, and it works with any other type that can be meaningfully overlaid. Here is another example using Points:
End of explanation
"""
|
fullmetalfelix/ML-CSC-tutorial
|
tSNE.ipynb
|
gpl-3.0
|
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy
import pickle
from dscribe.descriptors import MBTR
from visualise import view
"""
Explanation: t-distributed Stochastic Neighbour Embedding
t-SNE is a nonlinear dimensionality reduction technique for high-dimensional data.
More info in the usual place: https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding
End of explanation
"""
dataIn = numpy.genfromtxt('./data/wineInputs.txt', delimiter=',')
dataOut = numpy.genfromtxt('./data/wineOutputs.txt', delimiter=',')
# find indexes of wines for each class
idx1 = numpy.where(dataOut==1)
idx2 = numpy.where(dataOut==2)
idx3 = numpy.where(dataOut==3)
# compute the tSNE transformation of the inputs in 2 dimensions
comp = TSNE(n_components=2).fit_transform(dataIn)
# plot the resulting 2D points
plt.plot(comp[:,0],comp[:,1],'ro')
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
"""
Explanation: We are going to apply this technique to a database of wine samples. The inputs are 13 chemical descriptors, the output is the index of its class (cheap, ok, good). In principle we do not know the output.
End of explanation
"""
plt.plot(comp[idx1,0],comp[idx1,1],'go')
plt.plot(comp[idx2,0],comp[idx2,1],'ro')
plt.plot(comp[idx3,0],comp[idx3,1],'bo')
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
"""
Explanation: The transform had no idea about the output classes, and still three clusters of points can be seen. We can overlay the knowledge of correct classifaction to check if the clusters correspond to what we know:
End of explanation
"""
import ase.io
# load the database
samples = ase.io.read("data/clusters.extxyz", index=':')
# samples is now a list of ASE Atoms objects, ready to use!
# the first 55 clusters are FCC, the last 55 are BCC
# define MBTR setup
mbtr = MBTR(
species=["Fe"],
periodic=False,
k2={
"geometry": {"function": "distance"},
"grid": { "min": 0, "max": 2, "sigma": 0.01, "n": 200 },
"weighting": {"function": "exp", "scale": 0.4, "cutoff": 1e-2}
},
k3={
"geometry": {"function": "cosine"},
"grid": { "min": -1.0, "max": 1.0, "sigma": 0.02, "n": 200 },
"weighting": {"function": "exp", "scale": 0.4, "cutoff": 1e-2}
},
flatten=True,
sparse=False,
)
# calculate MBTR descriptor for each sample - takes a few secs
mbtrs = mbtr.create(samples)
print(mbtrs.shape)
"""
Explanation: Exercises
1. Iron clusters
We have a bunch of Fe clusters and it is not easy to determine their crystal structure with conventional tools. Let's try using the MBTR descriptor and t-SNE on these clusters and check if we can distinguish between FCC and BCC phases.
End of explanation
"""
# ...
"""
Explanation: Plot the t-SNE projection of MBTR output and see if you can see the two classes of structures accurately
End of explanation
"""
# ...
"""
Explanation: Plot the original MBTR descriptors and see if the structural differences are visible there
End of explanation
"""
# ...
"""
Explanation: Try changing the MBTR and t-SNE parameters and see how the projection changes
End of explanation
"""
|
d00d/quantNotebooks
|
Notebooks/quantopian_research_public/notebooks/lectures/The_Dangers_of_Overfitting/notebook.ipynb
|
unlicense
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
from statsmodels import regression
from scipy import poly1d
x = np.arange(10)
y = 2*np.random.randn(10) + x**2
xs = np.linspace(-0.25, 9.25, 200)
lin = np.polyfit(x, y, 1)
quad = np.polyfit(x, y, 2)
many = np.polyfit(x, y, 9)
plt.scatter(x, y)
plt.plot(xs, poly1d(lin)(xs))
plt.plot(xs, poly1d(quad)(xs))
plt.plot(xs, poly1d(many)(xs))
plt.ylabel('Y')
plt.xlabel('X')
plt.legend(['Underfit', 'Good fit', 'Overfit']);
"""
Explanation: Overfitting
By Evgenia "Jenny" Nitishinskaya and Delaney Granizo-Mackenzie. Algorithms by David Edwards.
Part of the Quantopian Lecture Series:
www.quantopian.com/lectures
github.com/quantopian/research_public
Notebook released under the Creative Commons Attribution 4.0 License.
What is overfitting?
When constructing a model, we tune both the parameters and the model by fitting to sample data. We then use the model and parameters to predict data we have not yet observed. We say a model is overfit when it is overly sensitive to noise and idiosyncracies in the sample data, and therefore does not reflect the underlying data-generating process.
To understand why this happens, one has to consider the amount of noise present in any dataset. One can consider a set of data as $D_{T}$, the true underlying data that came from whatever process we are trying to model, and $\epsilon$, some random noise. Because what we see is $D = D_{T} + \epsilon$, we might fit out model to very perfectly predict for the given $\epsilon$, but not for $D_{T}$.
This is problematic because we only care about fitting to the sample insofar as that gives an accurate fit to future data. The two broad causes of overfitting are:
* small sample size, so that noise and trend are not distinguishable
* choosing an overly complex model, so that it ends up contorting to fit the noise in the sample
Verbal Example: Too Many Rules (Complexity)
Let's say you have the following dataset:
| TV Channel | Room Lighting Intensity | Enjoyment |
|------------|-------------------------|-----------|
| 1 | 2 | 1 |
| 2 | 3 | 2 |
| 3 | 1 | 3 |
You are trying to predict enjoyment, so you create the following rules:
If TV Channel is 1 and Lighting Intensity is 2, then Enjoyment will be 1.
If TV Channel is 2 and Lighting Intensity is 3, then Enjoyment will be 2.
If TV Channel is 3 and Lighting Intensity is 1, then Enjoyment will be 3.
In all other cases predict an average enjoyment of 2.
This is a well defined model for future data, however, in this case let's say your enjoyment is purely dependent on the tv channel and not on the lighting. Because we have a rule for each row in our dataset, our model is perfectly predictive in our historical data, but would performly poorly in real trials because we are overfitting to random noise in the lighting intensity data.
Generalizing this to stocks, if your model starts developing many specific rules based on specific past events, it is almost definitely overfitting. This is why black-box machine learning (neural networks, etc.) is so dangerous when not done correctly.
Example: Curve fitting
Overfitting is most easily seen when we look at polynomial regression. Below we construct a dataset which noisily follows a quadratic. The linear model is underfit: simple linear models aren't suitable for all situations, especially when we have reason to believe that the data is nonlinear. The quadratic curve has some error but fits the data well.
When we fit a ninth-degree polynomial to the data, the error is zero - a ninth-degree polynomial can be constructed to go through any 10 points - but, looking at the tails of the curve, we know that we can't expect it to accurately predict other samples from the same distribution. It fits the data perfectly, but that is because it also fits the noise perfectly, and the noise is not what we want to model. In this case we have selected a model that is too complex.
End of explanation
"""
# Load one year's worth of pricing data for five different assets
start = '2013-01-01'
end = '2014-01-01'
x1 = get_pricing('PEP', fields='price', start_date=start, end_date=end)
x2 = get_pricing('MCD', fields='price', start_date=start, end_date=end)
x3 = get_pricing('ATHN', fields='price', start_date=start, end_date=end)
x4 = get_pricing('DOW', fields='price', start_date=start, end_date=end)
y = get_pricing('PG', fields='price', start_date=start, end_date=end)
# Build a linear model using only x1 to explain y
slr = regression.linear_model.OLS(y, sm.add_constant(x1)).fit()
slr_prediction = slr.params[0] + slr.params[1]*x1
# Run multiple linear regression using x1, x2, x3, x4 to explain y
mlr = regression.linear_model.OLS(y, sm.add_constant(np.column_stack((x1,x2,x3,x4)))).fit()
mlr_prediction = mlr.params[0] + mlr.params[1]*x1 + mlr.params[2]*x2 + mlr.params[3]*x3 + mlr.params[4]*x4
# Compute adjusted R-squared for the two different models
print 'SLR R-squared:', slr.rsquared_adj
print 'SLR p-value:', slr.f_pvalue
print 'MLR R-squared:', mlr.rsquared_adj
print 'MLR p-value:', mlr.f_pvalue
# Plot y along with the two different predictions
y.plot()
slr_prediction.plot()
mlr_prediction.plot()
plt.ylabel('Price')
plt.xlabel('Date')
plt.legend(['PG', 'SLR', 'MLR']);
"""
Explanation: When working with real data, there is unlikely to ever be a situation where a ninth-degree polynomial is appropriate: our choice of function should reflect a belief about the underlying process, and real-world processes generally do not follow high-degree polynomial curves. This example is contrived, but it can be tempting to use a quadratic or cubic model just to decrease sample error.
Note: Model/Parameter Parsimony
Just as the most elegant physics models describe a tremendous amount of our world through a few equations, a good trading model should explain most of the data through a few rules. Any time you start to have a number of rules even close to the number of points in your data set, you can be sure you are overfitting. Since parameters can be thought of as rules as they equivalently constrain a model, the same is true of parameters. Fewer parameters is better, and it is better to explain 60% of the data with 2-3 paremeters than 90% with 10.
Beware of the perfect fit
Because there is almost always noise present in real data, a perfect fit is almost always indicative of overfitting. It is almost impossible to know the percentage noise/signal in a given data set while you are developing the model, but use your common sense. Are the predictions surprisingly good? Then you're probably overfitting.
Example: Regression parameters
How do we know which variables to include in a model? If we're afraid of omitting something important, we might try different ones and include all the variables we can find that improve the fit. Below we regress one asset that is in the same sector as the asset whose price we're trying to predict, and three other unrelated ones. In our initial timeframe, we are able to fit the model more closely to the data when using multiple variables than when using just one.
End of explanation
"""
# Load the next of pricing data
start = '2014-01-01'
end = '2015-01-01'
x1 = get_pricing('PEP', fields='price', start_date=start, end_date=end)
x2 = get_pricing('MCD', fields='price', start_date=start, end_date=end)
x3 = get_pricing('ATHN', fields='price', start_date=start, end_date=end)
x4 = get_pricing('DOW', fields='price', start_date=start, end_date=end)
y = get_pricing('PG', fields='price', start_date=start, end_date=end)
# Extend our model from before to the new time period
slr_prediction2 = slr.params[0] + slr.params[1]*x1
mlr_prediction2 = mlr.params[0] + mlr.params[1]*x1 + mlr.params[2]*x2 + mlr.params[3]*x3 + mlr.params[4]*x4
# Manually compute adjusted R-squared over the new time period
# Adjustment 1 is for the SLR model
p = 1
N = len(y)
adj1 = float(N - 1)/(N - p - 1)
# Now for MLR
p = 4
N = len(y)
adj2 = float(N - 1)/(N - p - 1)
SST = sum((y - np.mean(y))**2)
SSRs = sum((slr_prediction2 - y)**2)
print 'SLR R-squared:', 1 - adj1*SSRs/SST
SSRm = sum((mlr_prediction2 - y)**2)
print 'MLR R-squared:', 1 - adj2*SSRm/SST
# Plot y along with the two different predictions
y.plot()
slr_prediction2.plot()
mlr_prediction2.plot()
plt.ylabel('Price')
plt.xlabel('Date')
plt.legend(['PG', 'SLR', 'MLR']);
"""
Explanation: However, when we use the same estimated parameters to model a different time period, we find that the single-variable model fits worse, while the multiple-variable model is entirely useless. It seems that the relationships we found are not consistent and are particular to the original sample period.
End of explanation
"""
# Load the pricing data for a stock
start = '2011-01-01'
end = '2013-01-01'
pricing = get_pricing('MCD', fields='price', start_date=start, end_date=end)
# Compute rolling averages for various window lengths
mu_30d = pd.rolling_mean(pricing, window=30)
mu_60d = pd.rolling_mean(pricing, window=60)
mu_100d = pd.rolling_mean(pricing, window=100)
# Plot asset pricing data with rolling means from the 100th day, when all the means become available
plt.plot(pricing[100:], label='Asset')
plt.plot(mu_30d[100:], label='30d MA')
plt.plot(mu_60d[100:], label='60d MA')
plt.plot(mu_100d[100:], label='100d MA')
plt.xlabel('Day')
plt.ylabel('Price')
plt.legend();
"""
Explanation: If we wanted, we could scan our universe for variables that were correlated with the dependent variable, and construct an extremely overfitted model. However, in most cases the correlation will be spurious, and the relationship will not continue into the future.
Example: Rolling windows
One of the challenges in building a model that uses rolling parameter estimates, such as rolling mean or rolling beta, is choosing a window length. A longer window will take into account long-term trends and be less volatile, but it will also lag more when taking into account new observations. The choice of window length strongly affects the rolling parameter estimate and can change how we see and treat the data. Below we calculate the rolling averages of a stock price for different window lengths:
End of explanation
"""
# Trade using a simple mean-reversion strategy
def trade(stock, length):
# If window length is 0, algorithm doesn't make sense, so exit
if length == 0:
return 0
# Compute rolling mean and rolling standard deviation
mu = pd.rolling_mean(stock, window=length)
std = pd.rolling_std(stock, window=length)
# Compute the z-scores for each day using the historical data up to that day
zscores = (stock - mu)/std
# Simulate trading
# Start with no money and no positions
money = 0
count = 0
for i in range(len(stock)):
# Sell short if the z-score is > 1
if zscores[i] > 1:
money += stock[i]
count -= 1
# Buy long if the z-score is < 1
elif zscores[i] < -1:
money -= stock[i]
count += 1
# Clear positions if the z-score between -.5 and .5
elif abs(zscores[i]) < 0.5:
money += count*stock[i]
count = 0
return money
# Find the window length 0-254 that gives the highest returns using this strategy
length_scores = [trade(pricing, l) for l in range(255)]
best_length = np.argmax(length_scores)
print 'Best window length:', best_length
# Get pricing data for a different timeframe
start2 = '2013-01-01'
end2 = '2015-01-01'
pricing2 = get_pricing('MCD', fields='price', start_date=start2, end_date=end2)
# Find the returns during this period using what we think is the best window length
length_scores2 = [trade(pricing2, l) for l in range(255)]
print best_length, 'day window:', length_scores2[best_length]
# Find the best window length based on this dataset, and the returns using this window length
best_length2 = np.argmax(length_scores2)
print best_length2, 'day window:', length_scores2[best_length2]
"""
Explanation: If we pick the length based on which seems best - say, on how well our model or algorithm performs - we are overfitting. Below we have a simple trading algorithm which bets on the stock price reverting to the rolling mean (for more details, check out the mean reversion notebook). We use the performance of this algorithm to score window lengths and find the best one. However, when we consider a different timeframe, this window length is far from optimal. This is because our original choice was overfitted to the sample data.
End of explanation
"""
plt.plot(length_scores)
plt.plot(length_scores2)
plt.xlabel('Window length')
plt.ylabel('Score')
plt.legend(['2011-2013', '2013-2015']);
"""
Explanation: Clearly fitting to our sample data doesn't always give good results in the future. Just for fun, let's plot the length scores computed from the two different timeframes:
End of explanation
"""
|
xpharry/Udacity-DLFoudation
|
tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb
|
mit
|
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r')
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r')
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
"""
Explanation: Introduction
Hi, my name is Andrew Trask. I am currently a PhD student at the University of Oxford studying Deep Learning for Natural Language Processing. Natural Language Processing is the field that studies human language and today we’re going to be talking about Sentiment Classification, or the classification of whether or not a section of human-generated text is positive or negative (i.e., happy or sad). Deep Learning, as I’m sure you’re coming to understand, is a set of tools (neural networks) used to take what we “know”, and predict what we what we “want to know”. In this case, we “know” a paragraph of text generated from a human, and we “want to know” whether or not it has positive or negative sentiment. Our goal is to build a neural network that can make this prediction.
What you will learn along the way - "Framing a Problem"
What this tutorial is really about is "framing a problem" so that a neural network can be successful in solving it. Sentiment is a great example because neural networks don't take raw text as input, they take numbers! We have to consider how to efficiently transform our text into numbers so that our network can learn a valuable underlying pattern. I can't stress enough how important this skillset will be to your career. Frameworks (like TensorFlow) will handle backpropagation, gradients, and error measures for you, but "framing the problem" is up to you, the scientist, and if it's not done correctly, your networks will spend forever searching for correlation between your two datasets (and they might never find it).
What You Should Already Know
I am assuming you already know about neural networks, forward and back-propagation, stochastic gradient descent, mean squared error, and train/test splits from previous lessons.
It Starts with a Dataset
Neural networks, by themselves, cannot do anything. All a neural network really does is search for direct or indirect correlation between two datasets. So, in order for a neural network to learn anything, we have to present it with two, meaningful datasets. The first dataset must represent what we “know” and the second dataset must represent what we “want to know”, or what we want the neural network to be able to tell us. As the network trains, it’s going to search for correlation between these two datasets, so that eventually it can take one and predict the other. Let me show you what I mean with our example sentiment dataset.
End of explanation
"""
reviews[0]
labels[0]
"""
Explanation: In the cell above, I have loaded two datasets. The first dataset "reviews" is a list of 25,000 movie reviews that people wrote about various movies. The second dataset is a list of whether or not each review is a “positive” review or “negative” review.
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: I want you to pretend that you’re a neural network for a moment. Consider a few examples from the two datasets below. Do you see any correlation between these two datasets?
End of explanation
"""
from collections import Counter
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 10):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
"""
Explanation: Well, let’s consider several different granularities. At the paragraph level, no two paragraphs are the same, so there can be no “correlation” per-say. You have to see two things occur at the same time more than once in order for there to be considered “correlation”. What about at the character level? I’m guessing the letter “b” is used just as much in positive reviews as it is in negative reviews. How about word level? Ah, I think there's some correlation between the words in these reviews and whether or not the review is positive or negative.
End of explanation
"""
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
"""
Explanation: Wow, there’s really something to this theory! As we can see, there are clearly terms in movie reviews that have correlation with our output labels. So, if we think there might be strong correlation between the words present in a particular review and the sentiment of that review, what should our network take as input and then predict? Let me put it a different way: If we think that there is correlation between the “vocabulary” of a particular review and the sentiment of that review, what should be the input and output to our neural network? The input should be the “vocabulary of the review” and the output should be whether or not the review is positive or negative!
Now that we have some idea that this task is possible (and where we want the network to find correlation), let’s try to train a neural network to predict sentiment based on the vocabulary of a movie review.
Transforming Text to Numbers
The next challenge is to transform our datasets into something that the neural network can read.
As I’m sure you’ve learned, neural networks are made up of layers of interconnected “neurons”. The first layer is where our input data “goes in” to the network. Any particular “input neuron” can take exactly two kinds of inputs, binary inputs and “real valued” inputs. Previously, you’ve been training networks on raw, continuous data, real valued inputs. However, now we’re modeling whether different input terms “exist” or “do not exist” in a movie review. When we model something that either “exists” or “does not exiest” or when something is either “true” or “false”, we want to use “binary” inputs to our neural network. This use of binary values is called "one-hot encoding". Let me show you what I mean.
Example Predictions
End of explanation
"""
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
"""
Explanation: The Input
Let’s say our entire movie review corpus has 10,000 words. Given a single movie review ("This was a horrible, terrible movie"), we’re going to put a “1” in the input of our neural network for every word that exists in the review, and a 0 everywhere else. So, given our 10,000 words, a movie review with 6 words would have 6 neurons with a “1” and 9,994 neurons with a “0”. The picture above is a miniturized version of this, displaying how we input a "1" for the words "horrible" and "terrible" while inputting a "0" for the word "excellent" because it was not present in the review.
The Output
In the same way, we want our network to either predict that the input is “positive” or “negative”. Now, our networks can’t write “positive” or “negative”, so we’re going to instead have another single neuron that represents “positive” when it is a “1” and “negative” when it is a “0”. In this way, our network can give us a number that we will interpret as “positive” or “negative”.
Big Picture
What we’re actually doing here is creating a “derivative dataset” from our movie reviews. Neural networks, after all, can’t read text. So, what we’re doing is identifying the “source of correlation” in our two datasets and creating a derivative dataset made up of numbers that preserve the patterns that we care about. In our input dataset, that pattern is the existence or non-existence of a particular word. In our output dataset, that pattern is whether a statement is positive or negative. Now we’ve converted our patterns into something our network can understand! Our network is going to look for correlation between the 1s and 0s in our input and the 1s and 0s in our output, and if it can do so it has learned to predict the sentiment of movie reviews. Now that our data is ready for the network, let’s start building the network.
Creating the Input Data
As we just learned above, in order for our neural network to predict on a movie review, we have to be able to create an input layer of 1s and 0s that correlates with the words present in a review. Let's start by creating a function that can take a review and generate this layer of 1s and 0s.
In order to create this function, we first must decide how many input neurons we need. The answer is quite simple. Since we want our network's input to be able to represent the presence or absence of any word in the vocabulary, we need one node per vocabulary term. So, our input layer size is the size of our vocabulary. Let's calculate that.
End of explanation
"""
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
"""
Explanation: And now we can initialize our (empty) input layer as vector of 0s. We'll modify it later by putting "1"s in various positions.
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network.png')
"""
Explanation: And now we want to create a function that will set our layer_0 list to the correct sequence of 1s and 0s based on a single review. Now if you remember our picture before, you might have noticed something. Each word had a specific place in the input of our network.
End of explanation
"""
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
"""
Explanation: In order to create a function that can update our layer_0 variable based on a review, we have to decide which spots in our layer_0 vector (list of numbers) correlate with each word. Truth be told, it doesn't matter which ones we choose, only that we pick spots for each word and stick with them. Let's decide those positions now and store them in a python dictionary called "word2index".
End of explanation
"""
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] = 1
update_input_layer(reviews[0])
layer_0
"""
Explanation: ...and now we can use this new "word2index" dictionary to populate our input layer with the right 1s in the right places.
End of explanation
"""
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
get_target_for_label(labels[0])
get_target_for_label(labels[1])
"""
Explanation: Creating the Target Data
And now we want to do the same thing for our target predictions
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network_2.png')
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
np.random.seed(1)
self.pre_process_data()
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000])
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
# evaluate the model after training
mlp.test(reviews[-1000:],labels[-1000:])
mlp.run("That movie was great")
"""
Explanation: Putting it all together in a Neural Network
End of explanation
"""
layer_0 = np.zeros(10)
layer_0
"""
Explanation: Making our Network Train and Run Faster
Even though this network is very trainable on a laptop, we can really get a lot more performance out of it, and doing so is all about understanding how the neural network is interacting with our data (again, "modeling the problem"). Let's take a moment to consider how layer_1 is generated. First, we're going to create a smaller layer_0 so that we can easily picture all the values in our notebook.
End of explanation
"""
layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5)
"""
Explanation: Now, let's set a few of the inputs to 1s, and create a sample weight matrix
End of explanation
"""
layer_1 = layer_0.dot(weights_0_1)
layer_1
"""
Explanation: So, given these pieces, layer_1 is created in the following way....
End of explanation
"""
Image(filename='sentiment_network_sparse.png')
"""
Explanation: layer_1 is generated by performing vector->matrix multiplication, however, most of our input neurons are turned off! Thus, there's actually a lot of computation being wasted. Consider the network below.
End of explanation
"""
Image(filename='sentiment_network_sparse_2.png')
"""
Explanation: First Inefficiency: "0" neurons waste computation
If you recall from previous lessons, each edge from one neuron to another represents a single value in our weights_0_1 matrix. When we forward propagate, we take our input neuron's value, multiply it by each weight attached to that neuron, and then sum all the resulting values in the next layer. So, in this case, if only "excellent" was turned on, then all of the multiplications comein gout of "horrible" and "terrible" are wasted computation! All of the weights coming out of "horrible" and "terrible" are being multiplied by 0, thus having no affect on our values in layer_1.
End of explanation
"""
#inefficient thing we did before
layer_1 = layer_0.dot(weights_0_1)
layer_1
# new, less expensive lookup table version
layer_1 = weights_0_1[4] + weights_0_1[9]
layer_1
"""
Explanation: Second Inefficiency: "1" neurons don't need to multiply!
When we're forward propagating, we multiply our input neuron's value by the weights attached to it. However, in this case, when the neuron is turned on, it's always turned on to exactly 1. So, there's no need for multiplication, what if we skipped this step?
The Solution: Create layer_1 by adding the vectors for each word.
Instead of generating a huge layer_0 vector and then performing a full vector->matrix multiplication across our huge weights_0_1 matrix, we can simply sum the rows of weights_0_1 that correspond to the words in our review. The resulting value of layer_1 will be exactly the same as if we had performed a full matrix multiplication at a fraction of the computational cost. This is called a "lookup table" or an "embedding layer".
End of explanation
"""
import time
import sys
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
np.random.seed(1)
self.pre_process_data(reviews)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self,reviews):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
self.layer_1 = np.zeros((1,hidden_nodes))
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def train(self, training_reviews_raw, training_labels):
training_reviews = list()
for review in training_reviews_raw:
indices = set()
for word in review.split(" "):
if(word in self.word2index.keys()):
indices.add(self.word2index[word])
training_reviews.append(list(indices))
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
# Hidden layer
# layer_1 = self.layer_0.dot(self.weights_0_1)
self.layer_1 *= 0
for index in review:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# Update the weights
self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
for index in review:
self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
# Hidden layer
self.layer_1 *= 0
unique_indices = set()
for word in review.lower().split(" "):
if word in self.word2index.keys():
unique_indices.add(self.word2index[word])
for index in unique_indices:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: See how they generate exactly the same value? Let's update our new neural network to do this.
End of explanation
"""
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: And wallah! Our network learns 10x faster than before while making exactly the same predictions!
End of explanation
"""
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="Word Positive/Negative Affinity Distribution")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
"""
Explanation: Our network even tests over twice as fast as well!
Making Learning Faster & Easier by Reducing Noise
So at first this might seem like the same thing we did in the previous section. However, while the previous section was about looking for computational waste and triming it out, this section is about looking for noise in our data and trimming it out. When we reduce the "noise" in our data, the neural network can identify correlation must faster and with greater accuracy. Whereas our technique will be simple, many recently developed state-of-the-art techniques (most notably attention and batch normalization) are all about reducing the amount of noise that your network has to filter through. The more obvious you can make the correaltion to your neural network, the better.
Our network is looking for correlation between movie review vocabularies and output positive/negative labels. In order to do this, our network has to come to understand over 70,000 different words in our vocabulary! That's a ton of knowledge that the network has to learn!
This begs the questions, are all the words in the vocabulary actually relevant to sentiment? A few pages ago, we counted how often words occured in positive reviews relative to negative reviews and created a ratio. We could then sort words by this ratio and see the words with the most positive and negative affinity. If you remember, the output looked like this:
End of explanation
"""
frequency_frequency = Counter()
for word, cnt in total_counts.most_common():
frequency_frequency[cnt] += 1
hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="The frequency distribution of the words in our corpus")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
"""
Explanation: In this graph "0" means that a word has no affinitity for either positive or negative. AS you can see, the vast majority of our words don't have that much direct affinity! So, our network is having to learn about lots of terms that are likely irrelevant to the final prediction. If we remove some of the most irrelevant words, our network will have fewer words that it has to learn about, allowing it to focus more on the words that matters.
Furthermore, check out this graph of simple word frequency
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,min_count = 10,polarity_cutoff = 0.1,hidden_nodes = 10, learning_rate = 0.1):
np.random.seed(1)
self.pre_process_data(reviews, polarity_cutoff, min_count)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self,reviews, polarity_cutoff,min_count):
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt >= 50):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
review_vocab = set()
for review in reviews:
for word in review.split(" "):
if(total_counts[word] > min_count):
if(word in pos_neg_ratios.keys()):
if((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)):
review_vocab.add(word)
else:
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
self.layer_1 = np.zeros((1,hidden_nodes))
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def train(self, training_reviews_raw, training_labels):
training_reviews = list()
for review in training_reviews_raw:
indices = set()
for word in review.split(" "):
if(word in self.word2index.keys()):
indices.add(self.word2index[word])
training_reviews.append(list(indices))
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
# Hidden layer
# layer_1 = self.layer_0.dot(self.weights_0_1)
self.layer_1 *= 0
for index in review:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# Update the weights
self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
for index in review:
self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step
if(layer_2 >= 0.5 and label == 'POSITIVE'):
correct_so_far += 1
if(layer_2 < 0.5 and label == 'NEGATIVE'):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
# Hidden layer
self.layer_1 *= 0
unique_indices = set()
for word in review.lower().split(" "):
if word in self.word2index.keys():
unique_indices.add(self.word2index[word])
for index in unique_indices:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
if(layer_2[0] >= 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: As you can see, the vast majority of words in our corpus only happen once or twice. Unfortunately, this isn't enough for any of those words to be correlated with anything. Correlation requires seeing two things occur at the same time on multiple occasions so that you can identify a pattern. We should eliminate these very low frequency terms as well.
In the next network, we eliminate both low frequency words (via a min_count parameters) and words with low positive/negative affiliation
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: So, using these techniques, we are able to achieve a slightly higher testing score while training 2x faster than before. Furthermore, if we really crank up these metrics, we can get some pretty extreme speed with minimal loss in quality (if, for example, your business use case requires running very fast)
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp.word2index.keys()):
words_to_visualize.append(word)
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp.weights_0_1[mlp.word2index[word]])
if(pos_neg_ratios[word] > 0):
colors_list.append("#"+colors.rgb2hex([0,min(255,pos_neg_ratios[word] * 1),0])[3:])
else:
colors_list.append("#000000")
# colors_list.append("#"+colors.rgb2hex([0,0,min(255,pos_neg_ratios[word] * 1)])[3:])
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize))
p.scatter(x="x1", y="x2", size=8, source=source,color=colors_list)
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
# p.add_layout(word_labels)
show(p)
# green indicates positive words, black indicates negative words
"""
Explanation: What's Going On in the Weights?
End of explanation
"""
|
fedjo/thesis
|
project/aat/object_detection/object_detection_tutorial.ipynb
|
apache-2.0
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
"""
Explanation: Object Detection Demo
Welcome to the object detection inference walkthrough! This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the installation instructions before you start.
Imports
End of explanation
"""
# This is needed to display the images.
%matplotlib inline
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
"""
Explanation: Env setup
End of explanation
"""
from utils import label_map_util
from utils import visualization_utils as vis_util
"""
Explanation: Object detection imports
Here are the imports from the object detection module.
End of explanation
"""
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
"""
Explanation: Model preparation
Variables
Any model exported using the export_inference_graph.py tool can be loaded here simply by changing PATH_TO_CKPT to point to a new .pb file.
By default we use an "SSD with Mobilenet" model here. See the detection model zoo for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
End of explanation
"""
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
"""
Explanation: Download Model
End of explanation
"""
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
"""
Explanation: Load a (frozen) Tensorflow model into memory.
End of explanation
"""
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
"""
Explanation: Loading label map
Label maps map indices to category names, so that when our convolution network predicts 5, we know that this corresponds to airplane. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
End of explanation
"""
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
"""
Explanation: Helper code
End of explanation
"""
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
"""
Explanation: Detection
End of explanation
"""
|
jrg365/gpytorch
|
examples/02_Scalable_Exact_GPs/KISSGP_Regression.ipynb
|
mit
|
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
# Make plots inline
%matplotlib inline
"""
Explanation: Structured Kernel Interpollation (SKI/KISS-GP)
Overview
SKI (or KISS-GP) is a great way to scale a GP up to very large datasets (100,000+ data points).
Kernel interpolation for scalable structured Gaussian processes (KISS-GP) was introduced in this paper:
http://proceedings.mlr.press/v37/wilson15.pdf
SKI is asymptotically very fast (nearly linear), very precise (error decays cubically), and easy to use in GPyTorch!
As you will see in this tutorial, it's really easy to apply SKI to an existing model. All you have to do is wrap your kernel module with a GridInterpolationKernel.
End of explanation
"""
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * math.pi) + torch.randn(train_x.size()) * 0.2)
"""
Explanation: KISS-GP for 1D Data
Set up training data
We'll learn a simple sinusoid, but with lots of training data points. At 1000 points, this is where scalable methods start to become useful.
End of explanation
"""
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that. Here we are using a grid that has the same number of points as the training data (a ratio of 1.0). Performance can be sensitive to this parameter, so you may want to adjust it for your own problem on a validation set.
grid_size = gpytorch.utils.grid.choose_grid_size(train_x,1.0)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.RBFKernel(), grid_size=grid_size, num_dims=1
)
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
"""
Explanation: Set up the model
The model should be somewhat similar to the ExactGP model in the simple regression example.
The only difference: we're wrapping our kernel module in a GridInterpolationKernel. This signals to GPyTorch that you want to approximate this kernel matrix with SKI.
SKI has only one hyperparameter that you need to worry about: the grid size. For 1D functions, a good starting place is to use as many grid points as training points. (Don't worry - the grid points are really cheap to use!). You can use the gpytorch.utils.grid.choose_grid_size helper to get a good starting point.
If you want, you can also explicitly determine the grid bounds of the SKI approximation using the grid_bounds argument. However, it's easier if you don't use this argument - then GPyTorch automatically chooses the best bounds for you.
End of explanation
"""
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iterations = 1 if smoke_test else 30
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
"""
Explanation: Train the model hyperparameters
Even with 1000 points, this model still trains fast! SKI scales (essentially) linearly with data - whereas standard GP inference scales quadratically (in GPyTorch.)
End of explanation
"""
# Put model & likelihood into eval mode
model.eval()
likelihood.eval()
# Initalize plot
f, ax = plt.subplots(1, 1, figsize=(4, 3))
# The gpytorch.settings.fast_pred_var flag activates LOVE (for fast variances)
# See https://arxiv.org/abs/1803.06058
with torch.no_grad(), gpytorch.settings.fast_pred_var():
test_x = torch.linspace(0, 1, 51)
prediction = likelihood(model(test_x))
mean = prediction.mean
# Get lower and upper predictive bounds
lower, upper = prediction.confidence_region()
# Plot the training data as black stars
def ax_plot():
if smoke_test: return # this is for running the notebook in our testing framework
ax.plot(train_x.detach().numpy(), train_y.detach().numpy(), 'k*')
# Plot predictive means as blue line
ax.plot(test_x.detach().numpy(), mean.detach().numpy(), 'b')
# Plot confidence bounds as lightly shaded region
ax.fill_between(test_x.detach().numpy(), lower.detach().numpy(), upper.detach().numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Mean', 'Confidence'])
ax_plot()
"""
Explanation: Make predictions
SKI is especially well-suited for predictions. It can comnpute predictive means in constant time, and with LOVE enabled (see this notebook), predictive variances are also constant time.
End of explanation
"""
# We make an nxn grid of training points spaced every 1/(n-1) on [0,1]x[0,1]
n = 40
train_x = torch.zeros(pow(n, 2), 2)
for i in range(n):
for j in range(n):
train_x[i * n + j][0] = float(i) / (n-1)
train_x[i * n + j][1] = float(j) / (n-1)
# True function is sin( 2*pi*(x0+x1))
train_y = torch.sin((train_x[:, 0] + train_x[:, 1]) * (2 * math.pi)) + torch.randn_like(train_x[:, 0]).mul(0.01)
"""
Explanation: KISS-GP for 2D-4D Data
For 2-4D functions, SKI (or KISS-GP) can work very well out-of-the-box on larger datasets (100,000+ data points).
Kernel interpolation for scalable structured Gaussian processes (KISS-GP) was introduced in this paper:
http://proceedings.mlr.press/v37/wilson15.pdf
One thing to watch out for with multidimensional SKI - you can't use as fine-grain of a grid. If you have a high dimensional problem, you may want to try one of the other scalable regression methods.
Set up train data
Here we're learning a simple sin function - but in 2 dimensions
End of explanation
"""
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that
grid_size = gpytorch.utils.grid.choose_grid_size(train_x)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.RBFKernel(), grid_size=grid_size, num_dims=2
)
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
"""
Explanation: The model
As with the 1D case, applying SKI to a multidimensional kernel is as simple as wrapping that kernel with a GridInterpolationKernel. You'll want to be sure to set num_dims though!
SKI has only one hyperparameter that you need to worry about: the grid size. For 1D functions, a good starting place is to use as many grid points as training points. (Don't worry - the grid points are really cheap to use!). You can use the gpytorch.utils.grid.choose_grid_size helper to get a good starting point.
If you want, you can also explicitly determine the grid bounds of the SKI approximation using the grid_bounds argument. However, it's easier if you don't use this argument - then GPyTorch automatically chooses the best bounds for you.
End of explanation
"""
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
def train():
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
%time train()
"""
Explanation: Train the model hyperparameters
End of explanation
"""
# Set model and likelihood into evaluation mode
model.eval()
likelihood.eval()
# Generate nxn grid of test points spaced on a grid of size 1/(n-1) in [0,1]x[0,1]
n = 10
test_x = torch.zeros(int(pow(n, 2)), 2)
for i in range(n):
for j in range(n):
test_x[i * n + j][0] = float(i) / (n-1)
test_x[i * n + j][1] = float(j) / (n-1)
with torch.no_grad(), gpytorch.settings.fast_pred_var():
observed_pred = likelihood(model(test_x))
pred_labels = observed_pred.mean.view(n, n)
# Calc abosolute error
test_y_actual = torch.sin(((test_x[:, 0] + test_x[:, 1]) * (2 * math.pi))).view(n, n)
delta_y = torch.abs(pred_labels - test_y_actual).detach().numpy()
# Define a plotting function
def ax_plot(f, ax, y_labels, title):
if smoke_test: return # this is for running the notebook in our testing framework
im = ax.imshow(y_labels)
ax.set_title(title)
f.colorbar(im)
# Plot our predictive means
f, observed_ax = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax, pred_labels, 'Predicted Values (Likelihood)')
# Plot the true values
f, observed_ax2 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax2, test_y_actual, 'Actual Values (Likelihood)')
# Plot the absolute errors
f, observed_ax3 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax3, delta_y, 'Absolute Error Surface')
"""
Explanation: Make predictions with the model
End of explanation
"""
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that
# We're setting Kronecker structure to False because we're using an additive structure decomposition
grid_size = gpytorch.utils.grid.choose_grid_size(train_x, kronecker_structure=False)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.AdditiveStructureKernel(
gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.RBFKernel(), grid_size=128, num_dims=1
)
), num_dims=2
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
"""
Explanation: KISS-GP for higher dimensional data w/ Additive Structure
The method above won't scale to data with much more than ~4 dimensions, since the cost of creating the grid grows exponentially in the amount of data. Therefore, we'll need to make some additional approximations.
If the function you are modeling has additive structure across its dimensions, then SKI can be one of the most efficient methods for your problem.
To set this up, we'll wrap the GridInterpolationKernel used in the previous two models with one additional kernel: the AdditiveStructureKernel. The model will look something like this:
End of explanation
"""
|
nadvamir/deep-learning
|
transfer-learning/Transfer_Learning_Solution.ipynb
|
mit
|
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
"""
Explanation: Transfer Learning
Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using VGGNet trained on the ImageNet dataset as a feature extractor. Below is a diagram of the VGGNet architecture.
<img src="assets/cnnarchitecture.jpg" width=700px>
VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes.
You can read more about transfer learning from the CS231n course notes.
Pretrained VGGNet
We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg.
This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link.
End of explanation
"""
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
"""
Explanation: Flower power
Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the TensorFlow inception tutorial.
End of explanation
"""
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
"""
Explanation: ConvNet Codes
Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.
Here we're using the vgg16 module from tensorflow_vgg. The network takes images of size $244 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from the source code:
```
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
```
So what we want are the values of the first fully connected layer, after being ReLUd (self.relu6). To build the network, we use
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
This creates the vgg object, then builds the graph with vgg.build(input_). Then to get the values from the layer,
feed_dict = {input_: images}
codes = sess.run(vgg.relu6, feed_dict=feed_dict)
End of explanation
"""
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 10
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
images = np.concatenate(batch)
feed_dict = {input_: images}
codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
"""
Explanation: Below I'm running images through the VGG network in batches.
End of explanation
"""
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
"""
Explanation: Building the Classifier
Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work.
End of explanation
"""
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
lb.fit(labels)
labels_vecs = lb.transform(labels)
"""
Explanation: Data prep
As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!
Exercise: From scikit-learn, use LabelBinarizer to create one-hot encoded vectors from the labels.
End of explanation
"""
from sklearn.model_selection import StratifiedShuffleSplit
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
train_idx, val_idx = next(ss.split(codes, labels))
half_val_len = int(len(val_idx)/2)
val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:]
train_x, train_y = codes[train_idx], labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
"""
Explanation: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn.
You can create the splitter like so:
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
Then split the data with
splitter = ss.split(x, y)
ss.split returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use next(splitter) to get the indices. Be sure to read the documentation and the user guide.
Exercise: Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets.
End of explanation
"""
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
fc = tf.contrib.layers.fully_connected(inputs_, 256)
logits = tf.contrib.layers.fully_connected(fc, labels_vecs.shape[1], activation_fn=None)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer().minimize(cost)
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
Explanation: If you did it right, you should see these sizes for the training sets:
Train shapes (x, y): (2936, 4096) (2936, 5)
Validation shapes (x, y): (367, 4096) (367, 5)
Test shapes (x, y): (367, 4096) (367, 5)
Classifier layers
Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.
Exercise: With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost.
End of explanation
"""
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
"""
Explanation: Batches!
Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data.
End of explanation
"""
epochs = 10
iteration = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in get_batches(train_x, train_y):
feed = {inputs_: x,
labels_: y}
loss, _ = sess.run([cost, optimizer], feed_dict=feed)
print("Epoch: {}/{}".format(e+1, epochs),
"Iteration: {}".format(iteration),
"Training loss: {:.5f}".format(loss))
iteration += 1
if iteration % 5 == 0:
feed = {inputs_: val_x,
labels_: val_y}
val_acc = sess.run(accuracy, feed_dict=feed)
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Validation Acc: {:.4f}".format(val_acc))
saver.save(sess, "checkpoints/flowers.ckpt")
"""
Explanation: Training
Here, we'll train the network.
Exercise: So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help.
End of explanation
"""
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
"""
Explanation: Testing
Below you see the test accuracy. You can also see the predictions returned for images.
End of explanation
"""
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), lb.classes_)
"""
Explanation: Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
End of explanation
"""
|
Diyago/Machine-Learning-scripts
|
DEEP LEARNING/Pytorch from scratch/TODO/GAN/cycle-gan/CycleGAN_Exercise.ipynb
|
apache-2.0
|
# loading in and transforming data
import os
import torch
from torch.utils.data import DataLoader
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# visualizing data
import matplotlib.pyplot as plt
import numpy as np
import warnings
%matplotlib inline
"""
Explanation: CycleGAN, Image-to-Image Translation
In this notebook, we're going to define and train a CycleGAN to read in an image from a set $X$ and transform it so that it looks as if it belongs in set $Y$. Specifically, we'll look at a set of images of Yosemite national park taken either during the summer of winter. The seasons are our two domains!
The objective will be to train generators that learn to transform an image from domain $X$ into an image that looks like it came from domain $Y$ (and vice versa).
Some examples of image data in both sets are pictured below.
<img src='notebook_images/XY_season_images.png' width=50% />
Unpaired Training Data
These images do not come with labels, but CycleGANs give us a way to learn the mapping between one image domain and another using an unsupervised approach. A CycleGAN is designed for image-to-image translation and it learns from unpaired training data. This means that in order to train a generator to translate images from domain $X$ to domain $Y$, we do not have to have exact correspondences between individual images in those domains. For example, in the paper that introduced CycleGANs, the authors are able to translate between images of horses and zebras, even though there are no images of a zebra in exactly the same position as a horse or with exactly the same background, etc. Thus, CycleGANs enable learning a mapping from one domain $X$ to another domain $Y$ without having to find perfectly-matched, training pairs!
<img src='notebook_images/horse2zebra.jpg' width=50% />
CycleGAN and Notebook Structure
A CycleGAN is made of two types of networks: discriminators, and generators. In this example, the discriminators are responsible for classifying images as real or fake (for both $X$ and $Y$ kinds of images). The generators are responsible for generating convincing, fake images for both kinds of images.
This notebook will detail the steps you should take to define and train such a CycleGAN.
You'll load in the image data using PyTorch's DataLoader class to efficiently read in images from a specified directory.
Then, you'll be tasked with defining the CycleGAN architecture according to provided specifications. You'll define the discriminator and the generator models.
You'll complete the training cycle by calculating the adversarial and cycle consistency losses for the generator and discriminator network and completing a number of training epochs. It's suggested that you enable GPU usage for training.
Finally, you'll evaluate your model by looking at the loss over time and looking at sample, generated images.
Load and Visualize the Data
We'll first load in and visualize the training data, importing the necessary libraries to do so.
If you are working locally, you'll need to download the data as a zip file by clicking here.
It may be named summer2winter-yosemite/ with a dash or an underscore, so take note, extract the data to your home directory and make sure the below image_dir matches. Then you can proceed with the following loading code.
End of explanation
"""
def get_data_loader(image_type, image_dir='summer2winter-yosemite',
image_size=128, batch_size=16, num_workers=0):
"""Returns training and test data loaders for a given image type, either 'summer' or 'winter'.
These images will be resized to 128x128x3, by default, converted into Tensors, and normalized.
"""
# resize and normalize the images
transform = transforms.Compose([transforms.Resize(image_size), # resize to 128x128
transforms.ToTensor()])
# get training and test directories
image_path = './' + image_dir
train_path = os.path.join(image_path, image_type)
test_path = os.path.join(image_path, 'test_{}'.format(image_type))
# define datasets using ImageFolder
train_dataset = datasets.ImageFolder(train_path, transform)
test_dataset = datasets.ImageFolder(test_path, transform)
# create and return DataLoaders
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_loader, test_loader
# Create train and test dataloaders for images from the two domains X and Y
# image_type = directory names for our data
dataloader_X, test_dataloader_X = get_data_loader(image_type='summer')
dataloader_Y, test_dataloader_Y = get_data_loader(image_type='winter')
"""
Explanation: DataLoaders
The get_data_loader function returns training and test DataLoaders that can load data efficiently and in specified batches. The function has the following parameters:
* image_type: summer or winter, the names of the directories where the X and Y images are stored
* image_dir: name of the main image directory, which holds all training and test images
* image_size: resized, square image dimension (all images will be resized to this dim)
* batch_size: number of images in one batch of data
The test data is strictly for feeding to our generators, later on, so we can visualize some generated samples on fixed, test data.
You can see that this function is also responsible for making sure our images are of the right, square size (128x128x3) and converted into Tensor image types.
It's suggested that you use the default values of these parameters.
Note: If you are trying this code on a different set of data, you may get better results with larger image_size and batch_size parameters. If you change the batch_size, make sure that you create complete batches in the training loop otherwise you may get an error when trying to save sample data.
End of explanation
"""
# helper imshow function
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# get some images from X
dataiter = iter(dataloader_X)
# the "_" is a placeholder for no labels
images, _ = dataiter.next()
# show images
fig = plt.figure(figsize=(12, 8))
imshow(torchvision.utils.make_grid(images))
"""
Explanation: Display some Training Images
Below we provide a function imshow that reshape some given images and converts them to NumPy images so that they can be displayed by plt. This cell should display a grid that contains a batch of image data from set $X$.
End of explanation
"""
# get some images from Y
dataiter = iter(dataloader_Y)
images, _ = dataiter.next()
# show images
fig = plt.figure(figsize=(12,8))
imshow(torchvision.utils.make_grid(images))
"""
Explanation: Next, let's visualize a batch of images from set $Y$.
End of explanation
"""
# current range
img = images[0]
print('Min: ', img.min())
print('Max: ', img.max())
# helper scale function
def scale(x, feature_range=(-1, 1)):
''' Scale takes in an image x and returns that image, scaled
with a feature_range of pixel values from -1 to 1.
This function assumes that the input x is already scaled from 0-255.'''
# scale from 0-1 to feature_range
min, max = feature_range
x = x * (max - min) + min
return x
# scaled range
scaled_img = scale(img)
print('Scaled min: ', scaled_img.min())
print('Scaled max: ', scaled_img.max())
"""
Explanation: Pre-processing: scaling from -1 to 1
We need to do a bit of pre-processing; we know that the output of our tanh activated generator will contain pixel values in a range from -1 to 1, and so, we need to rescale our training images to a range of -1 to 1. (Right now, they are in a range from 0-1.)
End of explanation
"""
import torch.nn as nn
import torch.nn.functional as F
# helper conv function
def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Creates a convolutional layer, with optional batch normalization.
"""
layers = []
conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
layers.append(conv_layer)
if batch_norm:
layers.append(nn.BatchNorm2d(out_channels))
return nn.Sequential(*layers)
"""
Explanation: Define the Model
A CycleGAN is made of two discriminator and two generator networks.
Discriminators
The discriminators, $D_X$ and $D_Y$, in this CycleGAN are convolutional neural networks that see an image and attempt to classify it as real or fake. In this case, real is indicated by an output close to 1 and fake as close to 0. The discriminators have the following architecture:
<img src='notebook_images/discriminator_layers.png' width=80% />
This network sees a 128x128x3 image, and passes it through 5 convolutional layers that downsample the image by a factor of 2. The first four convolutional layers have a BatchNorm and ReLu activation function applied to their output, and the last acts as a classification layer that outputs one value.
Convolutional Helper Function
To define the discriminators, you're expected to use the provided conv function, which creates a convolutional layer + an optional batch norm layer.
End of explanation
"""
class Discriminator(nn.Module):
def __init__(self, conv_dim=64):
super(Discriminator, self).__init__()
# Define all convolutional layers
# Should accept an RGB image as input and output a single value
def forward(self, x):
# define feedforward behavior
return x
"""
Explanation: Define the Discriminator Architecture
Your task is to fill in the __init__ function with the specified 5 layer conv net architecture. Both $D_X$ and $D_Y$ have the same architecture, so we only need to define one class, and later instantiate two discriminators.
It's recommended that you use a kernel size of 4x4 and use that to determine the correct stride and padding size for each layer. This Stanford resource may also help in determining stride and padding sizes.
Define your convolutional layers in __init__
Then fill in the forward behavior of the network
The forward function defines how an input image moves through the discriminator, and the most important thing is to pass it through your convolutional layers in order, with a ReLu activation function applied to all but the last layer.
You should not apply a sigmoid activation function to the output, here, and that is because we are planning on using a squared error loss for training. And you can read more about this loss function, later in the notebook.
End of explanation
"""
# residual block class
class ResidualBlock(nn.Module):
"""Defines a residual block.
This adds an input x to a convolutional layer (applied to x) with the same size input and output.
These blocks allow a model to learn an effective transformation from one domain to another.
"""
def __init__(self, conv_dim):
super(ResidualBlock, self).__init__()
# conv_dim = number of inputs
# define two convolutional layers + batch normalization that will act as our residual function, F(x)
# layers should have the same shape input as output; I suggest a kernel_size of 3
def forward(self, x):
# apply a ReLu activation the outputs of the first layer
# return a summed output, x + resnet_block(x)
return x
"""
Explanation: Generators
The generators, G_XtoY and G_YtoX (sometimes called F), are made of an encoder, a conv net that is responsible for turning an image into a smaller feature representation, and a decoder, a transpose_conv net that is responsible for turning that representation into an transformed image. These generators, one from XtoY and one from YtoX, have the following architecture:
<img src='notebook_images/cyclegan_generator_ex.png' width=90% />
This network sees a 128x128x3 image, compresses it into a feature representation as it goes through three convolutional layers and reaches a series of residual blocks. It goes through a few (typically 6 or more) of these residual blocks, then it goes through three transpose convolutional layers (sometimes called de-conv layers) which upsample the output of the resnet blocks and create a new image!
Note that most of the convolutional and transpose-convolutional layers have BatchNorm and ReLu functions applied to their outputs with the exception of the final transpose convolutional layer, which has a tanh activation function applied to the output. Also, the residual blocks are made of convolutional and batch normalization layers, which we'll go over in more detail, next.
Residual Block Class
To define the generators, you're expected to define a ResidualBlock class which will help you connect the encoder and decoder portions of the generators. You might be wondering, what exactly is a Resnet block? It may sound familiar from something like ResNet50 for image classification, pictured below.
<img src='notebook_images/resnet_50.png' width=90%/>
ResNet blocks rely on connecting the output of one layer with the input of an earlier layer. The motivation for this structure is as follows: very deep neural networks can be difficult to train. Deeper networks are more likely to have vanishing or exploding gradients and, therefore, have trouble reaching convergence; batch normalization helps with this a bit. However, during training, we often see that deep networks respond with a kind of training degradation. Essentially, the training accuracy stops improving and gets saturated at some point during training. In the worst cases, deep models would see their training accuracy actually worsen over time!
One solution to this problem is to use Resnet blocks that allow us to learn so-called residual functions as they are applied to layer inputs. You can read more about this proposed architecture in the paper, Deep Residual Learning for Image Recognition by Kaiming He et. al, and the below image is from that paper.
<img src='notebook_images/resnet_block.png' width=40%/>
Residual Functions
Usually, when we create a deep learning model, the model (several layers with activations applied) is responsible for learning a mapping, M, from an input x to an output y.
M(x) = y (Equation 1)
Instead of learning a direct mapping from x to y, we can instead define a residual function
F(x) = M(x) - x
This looks at the difference between a mapping applied to x and the original input, x. F(x) is, typically, two convolutional layers + normalization layer and a ReLu in between. These convolutional layers should have the same number of inputs as outputs. This mapping can then be written as the following; a function of the residual function and the input x. The addition step creates a kind of loop that connects the input x to the output, y:
M(x) = F(x) + x (Equation 2) or
y = F(x) + x (Equation 3)
Optimizing a Residual Function
The idea is that it is easier to optimize this residual function F(x) than it is to optimize the original mapping M(x). Consider an example; what if we want y = x?
From our first, direct mapping equation, Equation 1, we could set M(x) = x but it is easier to solve the residual equation F(x) = 0, which, when plugged in to Equation 3, yields y = x.
Defining the ResidualBlock Class
To define the ResidualBlock class, we'll define residual functions (a series of layers), apply them to an input x and add them to that same input. This is defined just like any other neural network, with an __init__ function and the addition step in the forward function.
In our case, you'll want to define the residual block as:
* Two convolutional layers with the same size input and output
* Batch normalization applied to the outputs of the convolutional layers
* A ReLu function on the output of the first convolutional layer
Then, in the forward function, add the input x to this residual block. Feel free to use the helper conv function from above to create this block.
End of explanation
"""
# helper deconv function
def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Creates a transpose convolutional layer, with optional batch normalization.
"""
layers = []
# append transpose conv layer
layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))
# optional batch norm layer
if batch_norm:
layers.append(nn.BatchNorm2d(out_channels))
return nn.Sequential(*layers)
"""
Explanation: Transpose Convolutional Helper Function
To define the generators, you're expected to use the above conv function, ResidualBlock class, and the below deconv helper function, which creates a transpose convolutional layer + an optional batchnorm layer.
End of explanation
"""
class CycleGenerator(nn.Module):
def __init__(self, conv_dim=64, n_res_blocks=6):
super(CycleGenerator, self).__init__()
# 1. Define the encoder part of the generator
# 2. Define the resnet part of the generator
# 3. Define the decoder part of the generator
def forward(self, x):
"""Given an image x, returns a transformed image."""
# define feedforward behavior, applying activations as necessary
return x
"""
Explanation: Define the Generator Architecture
Complete the __init__ function with the specified 3 layer encoder convolutional net, a series of residual blocks (the number of which is given by n_res_blocks), and then a 3 layer decoder transpose convolutional net.
Then complete the forward function to define the forward behavior of the generators. Recall that the last layer has a tanh activation function.
Both $G_{XtoY}$ and $G_{YtoX}$ have the same architecture, so we only need to define one class, and later instantiate two generators.
End of explanation
"""
def create_model(g_conv_dim=64, d_conv_dim=64, n_res_blocks=6):
"""Builds the generators and discriminators."""
# Instantiate generators
G_XtoY =
G_YtoX =
# Instantiate discriminators
D_X =
D_Y =
# move models to GPU, if available
if torch.cuda.is_available():
device = torch.device("cuda:0")
G_XtoY.to(device)
G_YtoX.to(device)
D_X.to(device)
D_Y.to(device)
print('Models moved to GPU.')
else:
print('Only CPU available.')
return G_XtoY, G_YtoX, D_X, D_Y
# call the function to get models
G_XtoY, G_YtoX, D_X, D_Y = create_model()
"""
Explanation: Create the complete network
Using the classes you defined earlier, you can define the discriminators and generators necessary to create a complete CycleGAN. The given parameters should work for training.
First, create two discriminators, one for checking if $X$ sample images are real, and one for checking if $Y$ sample images are real. Then the generators. Instantiate two of them, one for transforming a painting into a realistic photo and one for transforming a photo into into a painting.
End of explanation
"""
# helper function for printing the model architecture
def print_models(G_XtoY, G_YtoX, D_X, D_Y):
"""Prints model information for the generators and discriminators.
"""
print(" G_XtoY ")
print("-----------------------------------------------")
print(G_XtoY)
print()
print(" G_YtoX ")
print("-----------------------------------------------")
print(G_YtoX)
print()
print(" D_X ")
print("-----------------------------------------------")
print(D_X)
print()
print(" D_Y ")
print("-----------------------------------------------")
print(D_Y)
print()
# print all of the models
print_models(G_XtoY, G_YtoX, D_X, D_Y)
"""
Explanation: Check that you've implemented this correctly
The function create_model should return the two generator and two discriminator networks. After you've defined these discriminator and generator components, it's good practice to check your work. The easiest way to do this is to print out your model architecture and read through it to make sure the parameters are what you expected. The next cell will print out their architectures.
End of explanation
"""
def real_mse_loss(D_out):
# how close is the produced output from being "real"?
def fake_mse_loss(D_out):
# how close is the produced output from being "false"?
def cycle_consistency_loss(real_im, reconstructed_im, lambda_weight):
# calculate reconstruction loss
# return weighted loss
"""
Explanation: Discriminator and Generator Losses
Computing the discriminator and the generator losses are key to getting a CycleGAN to train.
<img src='notebook_images/CycleGAN_loss.png' width=90% height=90% />
Image from original paper by Jun-Yan Zhu et. al.
The CycleGAN contains two mapping functions $G: X \rightarrow Y$ and $F: Y \rightarrow X$, and associated adversarial discriminators $D_Y$ and $D_X$. (a) $D_Y$ encourages $G$ to translate $X$ into outputs indistinguishable from domain $Y$, and vice versa for $D_X$ and $F$.
To further regularize the mappings, we introduce two cycle consistency losses that capture the intuition that if
we translate from one domain to the other and back again we should arrive at where we started. (b) Forward cycle-consistency loss and (c) backward cycle-consistency loss.
Least Squares GANs
We've seen that regular GANs treat the discriminator as a classifier with the sigmoid cross entropy loss function. However, this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we'll use a least squares loss function for the discriminator. This structure is also referred to as a least squares GAN or LSGAN, and you can read the original paper on LSGANs, here. The authors show that LSGANs are able to generate higher quality images than regular GANs and that this loss type is a bit more stable during training!
Discriminator Losses
The discriminator losses will be mean squared errors between the output of the discriminator, given an image, and the target value, 0 or 1, depending on whether it should classify that image as fake or real. For example, for a real image, x, we can train $D_X$ by looking at how close it is to recognizing and image x as real using the mean squared error:
out_x = D_X(x)
real_err = torch.mean((out_x-1)**2)
Generator Losses
Calculating the generator losses will look somewhat similar to calculating the discriminator loss; there will still be steps in which you generate fake images that look like they belong to the set of $X$ images but are based on real images in set $Y$, and vice versa. You'll compute the "real loss" on those generated images by looking at the output of the discriminator as it's applied to these fake images; this time, your generator aims to make the discriminator classify these fake images as real images.
Cycle Consistency Loss
In addition to the adversarial losses, the generator loss terms will also include the cycle consistency loss. This loss is a measure of how good a reconstructed image is, when compared to an original image.
Say you have a fake, generated image, x_hat, and a real image, y. You can get a reconstructed y_hat by applying G_XtoY(x_hat) = y_hat and then check to see if this reconstruction y_hat and the orginal image y match. For this, we recommed calculating the L1 loss, which is an absolute difference, between reconstructed and real images. You may also choose to multiply this loss by some weight value lambda_weight to convey its importance.
<img src='notebook_images/reconstruction_error.png' width=40% height=40% />
The total generator loss will be the sum of the generator losses and the forward and backward cycle consistency losses.
Define Loss Functions
To help us calculate the discriminator and gnerator losses during training, let's define some helpful loss functions. Here, we'll define three.
1. real_mse_loss that looks at the output of a discriminator and returns the error based on how close that output is to being classified as real. This should be a mean squared error.
2. fake_mse_loss that looks at the output of a discriminator and returns the error based on how close that output is to being classified as fake. This should be a mean squared error.
3. cycle_consistency_loss that looks at a set of real image and a set of reconstructed/generated images, and returns the mean absolute error between them. This has a lambda_weight parameter that will weight the mean absolute error in a batch.
It's recommended that you take a look at the original, CycleGAN paper to get a starting value for lambda_weight.
End of explanation
"""
import torch.optim as optim
# hyperparams for Adam optimizers
lr=
beta1=
beta2=
g_params = list(G_XtoY.parameters()) + list(G_YtoX.parameters()) # Get generator parameters
# Create optimizers for the generators and discriminators
g_optimizer = optim.Adam(g_params, lr, [beta1, beta2])
d_x_optimizer = optim.Adam(D_X.parameters(), lr, [beta1, beta2])
d_y_optimizer = optim.Adam(D_Y.parameters(), lr, [beta1, beta2])
"""
Explanation: Define the Optimizers
Next, let's define how this model will update its weights. This, like the GANs you may have seen before, uses Adam optimizers for the discriminator and generator. It's again recommended that you take a look at the original, CycleGAN paper to get starting hyperparameter values.
End of explanation
"""
# import save code
from helpers import save_samples, checkpoint
# train the network
def training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y,
n_epochs=1000):
print_every=10
# keep track of losses over time
losses = []
test_iter_X = iter(test_dataloader_X)
test_iter_Y = iter(test_dataloader_Y)
# Get some fixed data from domains X and Y for sampling. These are images that are held
# constant throughout training, that allow us to inspect the model's performance.
fixed_X = test_iter_X.next()[0]
fixed_Y = test_iter_Y.next()[0]
fixed_X = scale(fixed_X) # make sure to scale to a range -1 to 1
fixed_Y = scale(fixed_Y)
# batches per epoch
iter_X = iter(dataloader_X)
iter_Y = iter(dataloader_Y)
batches_per_epoch = min(len(iter_X), len(iter_Y))
for epoch in range(1, n_epochs+1):
# Reset iterators for each epoch
if epoch % batches_per_epoch == 0:
iter_X = iter(dataloader_X)
iter_Y = iter(dataloader_Y)
images_X, _ = iter_X.next()
images_X = scale(images_X) # make sure to scale to a range -1 to 1
images_Y, _ = iter_Y.next()
images_Y = scale(images_Y)
# move images to GPU if available (otherwise stay on CPU)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
images_X = images_X.to(device)
images_Y = images_Y.to(device)
# ============================================
# TRAIN THE DISCRIMINATORS
# ============================================
## First: D_X, real and fake loss components ##
# 1. Compute the discriminator losses on real images
# 2. Generate fake images that look like domain X based on real images in domain Y
# 3. Compute the fake loss for D_X
# 4. Compute the total loss and perform backprop
d_x_loss =
## Second: D_Y, real and fake loss components ##
d_y_loss =
# =========================================
# TRAIN THE GENERATORS
# =========================================
## First: generate fake X images and reconstructed Y images ##
# 1. Generate fake images that look like domain X based on real images in domain Y
# 2. Compute the generator loss based on domain X
# 3. Create a reconstructed y
# 4. Compute the cycle consistency loss (the reconstruction loss)
## Second: generate fake Y images and reconstructed X images ##
# 5. Add up all generator and reconstructed losses and perform backprop
g_total_loss =
# Print the log info
if epoch % print_every == 0:
# append real and fake discriminator losses and the generator loss
losses.append((d_x_loss.item(), d_y_loss.item(), g_total_loss.item()))
print('Epoch [{:5d}/{:5d}] | d_X_loss: {:6.4f} | d_Y_loss: {:6.4f} | g_total_loss: {:6.4f}'.format(
epoch, n_epochs, d_x_loss.item(), d_y_loss.item(), g_total_loss.item()))
sample_every=100
# Save the generated samples
if epoch % sample_every == 0:
G_YtoX.eval() # set generators to eval mode for sample generation
G_XtoY.eval()
save_samples(epoch, fixed_Y, fixed_X, G_YtoX, G_XtoY, batch_size=16)
G_YtoX.train()
G_XtoY.train()
# uncomment these lines, if you want to save your model
# checkpoint_every=1000
# # Save the model parameters
# if epoch % checkpoint_every == 0:
# checkpoint(epoch, G_XtoY, G_YtoX, D_X, D_Y)
return losses
n_epochs = 1000 # keep this small when testing if a model first works, then increase it to >=1000
losses = training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, n_epochs=n_epochs)
"""
Explanation: Training a CycleGAN
When a CycleGAN trains, and sees one batch of real images from set $X$ and $Y$, it trains by performing the following steps:
Training the Discriminators
1. Compute the discriminator $D_X$ loss on real images
2. Generate fake images that look like domain $X$ based on real images in domain $Y$
3. Compute the fake loss for $D_X$
4. Compute the total loss and perform backpropagation and $D_X$ optimization
5. Repeat steps 1-4 only with $D_Y$ and your domains switched!
Training the Generators
1. Generate fake images that look like domain $X$ based on real images in domain $Y$
2. Compute the generator loss based on how $D_X$ responds to fake $X$
3. Generate reconstructed $\hat{Y}$ images based on the fake $X$ images generated in step 1
4. Compute the cycle consistency loss by comparing the reconstructions with real $Y$ images
5. Repeat steps 1-4 only swapping domains
6. Add up all the generator and reconstruction losses and perform backpropagation + optimization
<img src='notebook_images/cycle_consistency_ex.png' width=70% />
Saving Your Progress
A CycleGAN repeats its training process, alternating between training the discriminators and the generators, for a specified number of training iterations. You've been given code that will save some example generated images that the CycleGAN has learned to generate after a certain number of training iterations. Along with looking at the losses, these example generations should give you an idea of how well your network has trained.
Below, you may choose to keep all default parameters; your only task is to calculate the appropriate losses and complete the training cycle.
End of explanation
"""
fig, ax = plt.subplots(figsize=(12,8))
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator, X', alpha=0.5)
plt.plot(losses.T[1], label='Discriminator, Y', alpha=0.5)
plt.plot(losses.T[2], label='Generators', alpha=0.5)
plt.title("Training Losses")
plt.legend()
"""
Explanation: Tips on Training and Loss Patterns
A lot of experimentation goes into finding the best hyperparameters such that the generators and discriminators don't overpower each other. It's often a good starting point to look at existing papers to find what has worked in previous experiments, I'd recommend this DCGAN paper in addition to the original CycleGAN paper to see what worked for them. Then, you can try your own experiments based off of a good foundation.
Discriminator Losses
When you display the generator and discriminator losses you should see that there is always some discriminator loss; recall that we are trying to design a model that can generate good "fake" images. So, the ideal discriminator will not be able to tell the difference between real and fake images and, as such, will always have some loss. You should also see that $D_X$ and $D_Y$ are roughly at the same loss levels; if they are not, this indicates that your training is favoring one type of discriminator over the and you may need to look at biases in your models or data.
Generator Loss
The generator's loss should start significantly higher than the discriminator losses because it is accounting for the loss of both generators and weighted reconstruction errors. You should see this loss decrease a lot at the start of training because initial, generated images are often far-off from being good fakes. After some time it may level off; this is normal since the generator and discriminator are both improving as they train. If you see that the loss is jumping around a lot, over time, you may want to try decreasing your learning rates or changing your cycle consistency loss to be a little more/less weighted.
End of explanation
"""
import matplotlib.image as mpimg
# helper visualization code
def view_samples(iteration, sample_dir='samples_cyclegan'):
# samples are named by iteration
path_XtoY = os.path.join(sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration))
path_YtoX = os.path.join(sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration))
# read in those samples
try:
x2y = mpimg.imread(path_XtoY)
y2x = mpimg.imread(path_YtoX)
except:
print('Invalid number of iterations.')
fig, (ax1, ax2) = plt.subplots(figsize=(18,20), nrows=2, ncols=1, sharey=True, sharex=True)
ax1.imshow(x2y)
ax1.set_title('X to Y')
ax2.imshow(y2x)
ax2.set_title('Y to X')
# view samples at iteration 100
view_samples(100, 'samples_cyclegan')
# view samples at iteration 1000
view_samples(1000, 'samples_cyclegan')
"""
Explanation: Evaluate the Result!
As you trained this model, you may have chosen to sample and save the results of your generated images after a certain number of training iterations. This gives you a way to see whether or not your Generators are creating good fake images. For example, the image below depicts real images in the $Y$ set, and the corresponding generated images during different points in the training process. You can see that the generator starts out creating very noisy, fake images, but begins to converge to better representations as it trains (though, not perfect).
<img src='notebook_images/sample-004000-summer2winter.png' width=50% />
Below, you've been given a helper function for displaying generated samples based on the passed in training iteration.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/mohc/cmip6/models/sandbox-3/toplevel.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-3', 'toplevel')
"""
Explanation: ES-DOC CMIP6 Model Properties - Toplevel
MIP Era: CMIP6
Institute: MOHC
Source ID: SANDBOX-3
Sub-Topics: Radiative Forcings.
Properties: 85 (42 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:15
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Flux Correction
3. Key Properties --> Genealogy
4. Key Properties --> Software Properties
5. Key Properties --> Coupling
6. Key Properties --> Tuning Applied
7. Key Properties --> Conservation --> Heat
8. Key Properties --> Conservation --> Fresh Water
9. Key Properties --> Conservation --> Salt
10. Key Properties --> Conservation --> Momentum
11. Radiative Forcings
12. Radiative Forcings --> Greenhouse Gases --> CO2
13. Radiative Forcings --> Greenhouse Gases --> CH4
14. Radiative Forcings --> Greenhouse Gases --> N2O
15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
17. Radiative Forcings --> Greenhouse Gases --> CFC
18. Radiative Forcings --> Aerosols --> SO4
19. Radiative Forcings --> Aerosols --> Black Carbon
20. Radiative Forcings --> Aerosols --> Organic Carbon
21. Radiative Forcings --> Aerosols --> Nitrate
22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
24. Radiative Forcings --> Aerosols --> Dust
25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
27. Radiative Forcings --> Aerosols --> Sea Salt
28. Radiative Forcings --> Other --> Land Use
29. Radiative Forcings --> Other --> Solar
1. Key Properties
Key properties of the model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Top level overview of coupled model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of coupled model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Flux Correction
Flux correction properties of the model
2.1. Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how flux corrections are applied in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Genealogy
Genealogy and history of the model
3.1. Year Released
Is Required: TRUE Type: STRING Cardinality: 1.1
Year the model was released
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. CMIP3 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP3 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. CMIP5 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP5 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Previous Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Previously known as
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of model
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.4. Components Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.5. Coupler
Is Required: FALSE Type: ENUM Cardinality: 0.1
Overarching coupling framework for model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Coupling
**
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of coupling in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.2. Atmosphere Double Flux
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.3. Atmosphere Fluxes Calculation Grid
Is Required: FALSE Type: ENUM Cardinality: 0.1
Where are the air-sea fluxes calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Atmosphere Relative Winds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics/diagnostics of the global mean state used in tuning model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics/diagnostics used in tuning model/component (such as 20th century)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.5. Energy Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. Fresh Water Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Conservation --> Heat
Global heat convervation properties of the model
7.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.6. Land Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the land/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation --> Fresh Water
Global fresh water convervation properties of the model
8.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh_water is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh water is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Runoff
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how runoff is distributed and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Iceberg Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how iceberg calving is modeled and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Endoreic Basins
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how endoreic basins (no ocean access) are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Snow Accumulation
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how snow accumulation over land and over sea-ice is treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Key Properties --> Conservation --> Salt
Global salt convervation properties of the model
9.1. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how salt is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Key Properties --> Conservation --> Momentum
Global momentum convervation properties of the model
10.1. Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how momentum is conserved in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Radiative Forcings
Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)
11.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative forcings (GHG and aerosols) implementation in model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Carbon dioxide forcing
12.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Methane forcing
13.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Nitrous oxide forcing
14.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Troposheric ozone forcing
15.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Stratospheric ozone forcing
16.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Ozone-depleting and non-ozone-depleting fluorinated gases forcing
17.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Equivalence Concentration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Details of any equivalence concentrations used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiative Forcings --> Aerosols --> SO4
SO4 aerosol forcing
18.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiative Forcings --> Aerosols --> Black Carbon
Black carbon aerosol forcing
19.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Organic carbon aerosol forcing
20.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiative Forcings --> Aerosols --> Nitrate
Nitrate forcing
21.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Cloud albedo effect forcing (RFaci)
22.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Cloud lifetime effect forcing (ERFaci)
23.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.3. RFaci From Sulfate Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative forcing from aerosol cloud interactions from sulfate aerosol only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiative Forcings --> Aerosols --> Dust
Dust forcing
24.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Tropospheric volcanic forcing
25.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Stratospheric volcanic forcing
26.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiative Forcings --> Aerosols --> Sea Salt
Sea salt forcing
27.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiative Forcings --> Other --> Land Use
Land use forcing
28.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28.2. Crop Change Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Land use change represented via crop change only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 29. Radiative Forcings --> Other --> Solar
Solar forcing
29.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How solar forcing is provided
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
|
manipopopo/tensorflow
|
tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb
|
apache-2.0
|
from __future__ import absolute_import, division, print_function
# Import TensorFlow >= 1.10 and enable eager execution
import tensorflow as tf
tf.enable_eager_execution()
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import time
print(tf.__version__)
"""
Explanation: Copyright 2018 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License").
Neural Machine Translation with Attention
<table class="tfo-notebook-buttons" align="left"><td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
This notebook trains a sequence to sequence (seq2seq) model for Spanish to English translation using tf.keras and eager execution. This is an advanced example that assumes some knowledge of sequence to sequence models.
After training the model in this notebook, you will be able to input a Spanish sentence, such as "¿todavia estan en casa?", and return the English translation: "are you still at home?"
The translation quality is reasonable for a toy example, but the generated attention plot is perhaps more interesting. This shows which parts of the input sentence has the model's attention while translating:
<img src="https://tensorflow.org/images/spanish-english.png" alt="spanish-english attention plot">
Note: This example takes approximately 10 mintues to run on a single P100 GPU.
End of explanation
"""
# Download the file
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return word_pairs
# This class creates a word -> index mapping (e.g,. "dad" -> 5) and vice-versa
# (e.g., 5 -> "dad") for each language,
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
self.create_index()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx['<pad>'] = 0
for index, word in enumerate(self.vocab):
self.word2idx[word] = index + 1
for word, index in self.word2idx.items():
self.idx2word[index] = word
def max_length(tensor):
return max(len(t) for t in tensor)
def load_dataset(path, num_examples):
# creating cleaned input, output pairs
pairs = create_dataset(path, num_examples)
# index language using the class defined above
inp_lang = LanguageIndex(sp for en, sp in pairs)
targ_lang = LanguageIndex(en for en, sp in pairs)
# Vectorize the input and target languages
# Spanish sentences
input_tensor = [[inp_lang.word2idx[s] for s in sp.split(' ')] for en, sp in pairs]
# English sentences
target_tensor = [[targ_lang.word2idx[s] for s in en.split(' ')] for en, sp in pairs]
# Calculate max_length of input and output tensor
# Here, we'll set those to the longest sentence in the dataset
max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)
# Padding the input and output tensor to the maximum length
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor,
maxlen=max_length_inp,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor,
maxlen=max_length_tar,
padding='post')
return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar
"""
Explanation: Download and prepare the dataset
We'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format:
May I borrow this book? ¿Puedo tomar prestado este libro?
There are a variety of languages available, but we'll use the English-Spanish dataset. For convenience, we've hosted a copy of this dataset on Google Cloud, but you can also download your own copy. After downloading the dataset, here are the steps we'll take to prepare the data:
Add a start and end token to each sentence.
Clean the sentences by removing special characters.
Create a word index and reverse word index (dictionaries mapping from word → id and id → word).
Pad each sentence to a maximum length.
End of explanation
"""
# Try experimenting with the size of that dataset
num_examples = 30000
input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_dataset(path_to_file, num_examples)
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# Show length
len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)
"""
Explanation: Limit the size of the dataset to experiment faster (optional)
Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset to 30,000 sentences (of course, translation quality degrades with less data):
End of explanation
"""
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
N_BATCH = BUFFER_SIZE//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word2idx)
vocab_tar_size = len(targ_lang.word2idx)
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SIZE))
"""
Explanation: Create a tf.data dataset
End of explanation
"""
def gru(units):
# If you have a GPU, we recommend using CuDNNGRU(provides a 3x speedup than GRU)
# the code automatically does that.
if tf.test.is_gpu_available():
return tf.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.enc_units)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.dec_units)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.W1 = tf.keras.layers.Dense(self.dec_units)
self.W2 = tf.keras.layers.Dense(self.dec_units)
self.V = tf.keras.layers.Dense(1)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
# hidden shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# we are doing this to perform addition to calculate the score
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, max_length, hidden_size)
score = tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * max_length, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc(output)
return x, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.dec_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
"""
Explanation: Write the encoder and decoder model
Here, we'll implement an encoder-decoder model with attention which you can read about in the TensorFlow Neural Machine Translation (seq2seq) tutorial. This example uses a more recent set of APIs. This notebook implements the attention equations from the seq2seq tutorial. The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence.
<img src="https://www.tensorflow.org/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism">
The input is put through an encoder model which gives us the encoder output of shape (batch_size, max_length, hidden_size) and the encoder hidden state of shape (batch_size, hidden_size).
Here are the equations that are implemented:
<img src="https://www.tensorflow.org/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800">
<img src="https://www.tensorflow.org/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800">
We're using Bahdanau attention. Lets decide on notation before writing the simplified form:
FC = Fully connected (dense) layer
EO = Encoder output
H = hidden state
X = input to the decoder
And the pseudo-code:
score = FC(tanh(FC(EO) + FC(H)))
attention weights = softmax(score, axis = 1). Softmax by default is applied on the last axis but here we want to apply it on the 1st axis, since the shape of score is (batch_size, max_length, hidden_size). Max_length is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis.
context vector = sum(attention weights * EO, axis = 1). Same reason as above for choosing axis as 1.
embedding output = The input to the decoder X is passed through an embedding layer.
merged vector = concat(embedding output, context vector)
This merged vector is then given to the GRU
The shapes of all the vectors at each step have been specified in the comments in the code:
End of explanation
"""
optimizer = tf.train.AdamOptimizer()
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
"""
Explanation: Define the optimizer and the loss function
End of explanation
"""
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
"""
Explanation: Checkpoints (Object-based saving)
End of explanation
"""
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
total_loss += batch_loss
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if epoch % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / N_BATCH))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
"""
Explanation: Training
Pass the input through the encoder which return encoder output and the encoder hidden state.
The encoder output, encoder hidden state and the decoder input (which is the start token) is passed to the decoder.
The decoder returns the predictions and the decoder hidden state.
The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.
Use teacher forcing to decide the next input to the decoder.
Teacher forcing is the technique where the target word is passed as the next input to the decoder.
The final step is to calculate the gradients and apply it to the optimizer and backpropagate.
End of explanation
"""
def evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word2idx[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
# storing the attention weigths to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy()
result += targ_lang.idx2word[predicted_id] + ' '
if targ_lang.idx2word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# function for plotting the attention weights
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
plt.show()
def translate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
result, sentence, attention_plot = evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
print('Input: {}'.format(sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
"""
Explanation: Translate
The evaluate function is similar to the training loop, except we don't use teacher forcing here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.
Stop predicting when the model predicts the end token.
And store the attention weights for every time step.
Note: The encoder output is calculated only once for one input.
End of explanation
"""
# restoring the latest checkpoint in checkpoint_dir
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate('hace mucho frio aqui.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
translate('esta es mi vida.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
translate('¿todavia estan en casa?', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# wrong translation
translate('trata de averiguarlo.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
"""
Explanation: Restore the latest checkpoint and test
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.18/_downloads/f51d54a1c1f3584f45318492102672d3/plot_creating_data_structures.ipynb
|
bsd-3-clause
|
import mne
import numpy as np
"""
Explanation: Creating MNE's data structures from scratch
MNE provides mechanisms for creating various core objects directly from
NumPy arrays.
End of explanation
"""
# Create some dummy metadata
n_channels = 32
sampling_rate = 200
info = mne.create_info(n_channels, sampling_rate)
print(info)
"""
Explanation: Creating :class:~mne.Info objects
<div class="alert alert-info"><h4>Note</h4><p>for full documentation on the :class:`~mne.Info` object, see
`tut-info-class`. See also `ex-array-classes`.</p></div>
Normally, :class:mne.Info objects are created by the various
data import functions <ch_convert>.
However, if you wish to create one from scratch, you can use the
:func:mne.create_info function to initialize the minimally required
fields. Further fields can be assigned later as one would with a regular
dictionary.
The following creates the absolute minimum info structure:
End of explanation
"""
# Names for each channel
channel_names = ['MEG1', 'MEG2', 'Cz', 'Pz', 'EOG']
# The type (mag, grad, eeg, eog, misc, ...) of each channel
channel_types = ['grad', 'grad', 'eeg', 'eeg', 'eog']
# The sampling rate of the recording
sfreq = 1000 # in Hertz
# The EEG channels use the standard naming strategy.
# By supplying the 'montage' parameter, approximate locations
# will be added for them
montage = 'standard_1005'
# Initialize required fields
info = mne.create_info(channel_names, sfreq, channel_types, montage)
# Add some more information
info['description'] = 'My custom dataset'
info['bads'] = ['Pz'] # Names of bad channels
print(info)
"""
Explanation: You can also supply more extensive metadata:
End of explanation
"""
# Generate some random data
data = np.random.randn(5, 1000)
# Initialize an info structure
info = mne.create_info(
ch_names=['MEG1', 'MEG2', 'EEG1', 'EEG2', 'EOG'],
ch_types=['grad', 'grad', 'eeg', 'eeg', 'eog'],
sfreq=100
)
custom_raw = mne.io.RawArray(data, info)
print(custom_raw)
"""
Explanation: <div class="alert alert-info"><h4>Note</h4><p>When assigning new values to the fields of an
:class:`mne.Info` object, it is important that the
fields are consistent:
- The length of the channel information field `chs` must be
`nchan`.
- The length of the `ch_names` field must be `nchan`.
- The `ch_names` field should be consistent with the `name` field
of the channel information contained in `chs`.</p></div>
Creating :class:~mne.io.Raw objects
To create a :class:mne.io.Raw object from scratch, you can use the
:class:mne.io.RawArray class, which implements raw data that is backed by a
numpy array. The correct units for the data are:
V: eeg, eog, seeg, emg, ecg, bio, ecog
T: mag
T/m: grad
M: hbo, hbr
Am: dipole
AU: misc
The :class:mne.io.RawArray constructor simply takes the data matrix and
:class:mne.Info object:
End of explanation
"""
# Generate some random data: 10 epochs, 5 channels, 2 seconds per epoch
sfreq = 100
data = np.random.randn(10, 5, sfreq * 2)
# Initialize an info structure
info = mne.create_info(
ch_names=['MEG1', 'MEG2', 'EEG1', 'EEG2', 'EOG'],
ch_types=['grad', 'grad', 'eeg', 'eeg', 'eog'],
sfreq=sfreq
)
"""
Explanation: Creating :class:~mne.Epochs objects
To create an :class:mne.Epochs object from scratch, you can use the
:class:mne.EpochsArray class, which uses a numpy array directly without
wrapping a raw object. The array must be of shape(n_epochs, n_chans,
n_times). The proper units of measure are listed above.
End of explanation
"""
# Create an event matrix: 10 events with alternating event codes
events = np.array([
[0, 0, 1],
[1, 0, 2],
[2, 0, 1],
[3, 0, 2],
[4, 0, 1],
[5, 0, 2],
[6, 0, 1],
[7, 0, 2],
[8, 0, 1],
[9, 0, 2],
])
"""
Explanation: It is necessary to supply an "events" array in order to create an Epochs
object. This is of shape(n_events, 3) where the first column is the sample
number (time) of the event, the second column indicates the value from which
the transition is made from (only used when the new value is bigger than the
old one), and the third column is the new event value.
End of explanation
"""
event_id = dict(smiling=1, frowning=2)
"""
Explanation: More information about the event codes: subject was either smiling or
frowning
End of explanation
"""
# Trials were cut from -0.1 to 1.0 seconds
tmin = -0.1
"""
Explanation: Finally, we must specify the beginning of an epoch (the end will be inferred
from the sampling frequency and n_samples)
End of explanation
"""
custom_epochs = mne.EpochsArray(data, info, events, tmin, event_id)
print(custom_epochs)
# We can treat the epochs object as we would any other
_ = custom_epochs['smiling'].average().plot(time_unit='s')
"""
Explanation: Now we can create the :class:mne.EpochsArray object
End of explanation
"""
# The averaged data
data_evoked = data.mean(0)
# The number of epochs that were averaged
nave = data.shape[0]
# A comment to describe to evoked (usually the condition name)
comment = "Smiley faces"
# Create the Evoked object
evoked_array = mne.EvokedArray(data_evoked, info, tmin,
comment=comment, nave=nave)
print(evoked_array)
_ = evoked_array.plot(time_unit='s')
"""
Explanation: Creating :class:~mne.Evoked Objects
If you already have data that is collapsed across trials, you may also
directly create an evoked array. Its constructor accepts an array of
shape(n_chans, n_times) in addition to some bookkeeping parameters.
The proper units of measure for the data are listed above.
End of explanation
"""
|
tbarrongh/cosc-learning-labs
|
src/notebook/03_interface_configuration.ipynb
|
apache-2.0
|
help('learning_lab.03_interface_configuration')
"""
Explanation: COSC Learning Lab
03_interface_configuration.py
Related Scripts:
* 03_interface_names.py
* 03_interface_properties.py
* 03_interface_configuration_update.py
Table of Contents
Table of Contents
Documentation
Implementation
Execution
HTTP
Documentation
End of explanation
"""
from importlib import import_module
script = import_module('learning_lab.03_interface_configuration')
from inspect import getsource
print(getsource(script.main))
print(getsource(script.demonstrate))
"""
Explanation: Implementation
End of explanation
"""
run ../learning_lab/03_interface_configuration.py
"""
Explanation: Execution
End of explanation
"""
from basics.odl_http import http_history
from basics.http import http_history_to_html
from IPython.core.display import HTML
HTML(http_history_to_html(http_history()))
"""
Explanation: HTTP
End of explanation
"""
|
nwjs/chromium.src
|
third_party/tensorflow-text/src/docs/tutorials/text_generation.ipynb
|
bsd-3-clause
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
import numpy as np
import os
import time
"""
Explanation: Text generation with an RNN
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/text/tutorials/text_generation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/text/blob/master/docs/tutorials/text_generation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/text/blob/master/docs/tutorials/text_generation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/text/docs/tutorials/text_generation.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial demonstrates how to generate text using a character-based RNN. You will work with a dataset of Shakespeare's writing from Andrej Karpathy's The Unreasonable Effectiveness of Recurrent Neural Networks. Given a sequence of characters from this data ("Shakespear"), train a model to predict the next character in the sequence ("e"). Longer sequences of text can be generated by calling the model repeatedly.
Note: Enable GPU acceleration to execute this notebook faster. In Colab: Runtime > Change runtime type > Hardware accelerator > GPU.
This tutorial includes runnable code implemented using tf.keras and eager execution. The following is the sample output when the model in this tutorial trained for 30 epochs, and started with the prompt "Q":
<pre>
QUEENE:
I had thought thou hadst a Roman; for the oracle,
Thus by All bids the man against the word,
Which are so weak of care, by old care done;
Your children were in your holy love,
And the precipitation through the bleeding throne.
BISHOP OF ELY:
Marry, and will, my lord, to weep in such a one were prettiest;
Yet now I was adopted heir
Of the world's lamentable day,
To watch the next way with his father with his face?
ESCALUS:
The cause why then we are all resolved more sons.
VOLUMNIA:
O, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, it is no sin it should be dead,
And love and pale as any will to that word.
QUEEN ELIZABETH:
But how long have I heard the soul for this world,
And show his hands of life be proved to stand.
PETRUCHIO:
I say he look'd on, if I must be content
To stay him from the fatal of our country's bliss.
His lordship pluck'd from this sentence then for prey,
And then let us twain, being the moon,
were she such a case as fills m
</pre>
While some of the sentences are grammatical, most do not make sense. The model has not learned the meaning of words, but consider:
The model is character-based. When training started, the model did not know how to spell an English word, or that words were even a unit of text.
The structure of the output resembles a play—blocks of text generally begin with a speaker name, in all capital letters similar to the dataset.
As demonstrated below, the model is trained on small batches of text (100 characters each), and is still able to generate a longer sequence of text with coherent structure.
Setup
Import TensorFlow and other libraries
End of explanation
"""
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
"""
Explanation: Download the Shakespeare dataset
Change the following line to run this code on your own data.
End of explanation
"""
# Read, then decode for py2 compat.
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
# length of text is the number of characters in it
print(f'Length of text: {len(text)} characters')
# Take a look at the first 250 characters in text
print(text[:250])
# The unique characters in the file
vocab = sorted(set(text))
print(f'{len(vocab)} unique characters')
"""
Explanation: Read the data
First, look in the text:
End of explanation
"""
example_texts = ['abcdefg', 'xyz']
chars = tf.strings.unicode_split(example_texts, input_encoding='UTF-8')
chars
"""
Explanation: Process the text
Vectorize the text
Before training, you need to convert the strings to a numerical representation.
The preprocessing.StringLookup layer can convert each character into a numeric ID. It just needs the text to be split into tokens first.
End of explanation
"""
ids_from_chars = preprocessing.StringLookup(
vocabulary=list(vocab), mask_token=None)
"""
Explanation: Now create the preprocessing.StringLookup layer:
End of explanation
"""
ids = ids_from_chars(chars)
ids
"""
Explanation: It converts form tokens to character IDs:
End of explanation
"""
chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(
vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None)
"""
Explanation: Since the goal of this tutorial is to generate text, it will also be important to invert this representation and recover human-readable strings from it. For this you can use preprocessing.StringLookup(..., invert=True).
Note: Here instead of passing the original vocabulary generated with sorted(set(text)) use the get_vocabulary() method of the preprocessing.StringLookup layer so that the [UNK] tokens is set the same way.
End of explanation
"""
chars = chars_from_ids(ids)
chars
"""
Explanation: This layer recovers the characters from the vectors of IDs, and returns them as a tf.RaggedTensor of characters:
End of explanation
"""
tf.strings.reduce_join(chars, axis=-1).numpy()
def text_from_ids(ids):
return tf.strings.reduce_join(chars_from_ids(ids), axis=-1)
"""
Explanation: You can tf.strings.reduce_join to join the characters back into strings.
End of explanation
"""
all_ids = ids_from_chars(tf.strings.unicode_split(text, 'UTF-8'))
all_ids
ids_dataset = tf.data.Dataset.from_tensor_slices(all_ids)
for ids in ids_dataset.take(10):
print(chars_from_ids(ids).numpy().decode('utf-8'))
seq_length = 100
examples_per_epoch = len(text)//(seq_length+1)
"""
Explanation: The prediction task
Given a character, or a sequence of characters, what is the most probable next character? This is the task you're training the model to perform. The input to the model will be a sequence of characters, and you train the model to predict the output—the following character at each time step.
Since RNNs maintain an internal state that depends on the previously seen elements, given all the characters computed until this moment, what is the next character?
Create training examples and targets
Next divide the text into example sequences. Each input sequence will contain seq_length characters from the text.
For each input sequence, the corresponding targets contain the same length of text, except shifted one character to the right.
So break the text into chunks of seq_length+1. For example, say seq_length is 4 and our text is "Hello". The input sequence would be "Hell", and the target sequence "ello".
To do this first use the tf.data.Dataset.from_tensor_slices function to convert the text vector into a stream of character indices.
End of explanation
"""
sequences = ids_dataset.batch(seq_length+1, drop_remainder=True)
for seq in sequences.take(1):
print(chars_from_ids(seq))
"""
Explanation: The batch method lets you easily convert these individual characters to sequences of the desired size.
End of explanation
"""
for seq in sequences.take(5):
print(text_from_ids(seq).numpy())
"""
Explanation: It's easier to see what this is doing if you join the tokens back into strings:
End of explanation
"""
def split_input_target(sequence):
input_text = sequence[:-1]
target_text = sequence[1:]
return input_text, target_text
split_input_target(list("Tensorflow"))
dataset = sequences.map(split_input_target)
for input_example, target_example in dataset.take(1):
print("Input :", text_from_ids(input_example).numpy())
print("Target:", text_from_ids(target_example).numpy())
"""
Explanation: For training you'll need a dataset of (input, label) pairs. Where input and
label are sequences. At each time step the input is the current character and the label is the next character.
Here's a function that takes a sequence as input, duplicates, and shifts it to align the input and label for each timestep:
End of explanation
"""
# Batch size
BATCH_SIZE = 64
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
dataset = (
dataset
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
dataset
"""
Explanation: Create training batches
You used tf.data to split the text into manageable sequences. But before feeding this data into the model, you need to shuffle the data and pack it into batches.
End of explanation
"""
# Length of the vocabulary in chars
vocab_size = len(vocab)
# The embedding dimension
embedding_dim = 256
# Number of RNN units
rnn_units = 1024
class MyModel(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, rnn_units):
super().__init__(self)
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(rnn_units,
return_sequences=True,
return_state=True)
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, states=None, return_state=False, training=False):
x = inputs
x = self.embedding(x, training=training)
if states is None:
states = self.gru.get_initial_state(x)
x, states = self.gru(x, initial_state=states, training=training)
x = self.dense(x, training=training)
if return_state:
return x, states
else:
return x
model = MyModel(
# Be sure the vocabulary size matches the `StringLookup` layers.
vocab_size=len(ids_from_chars.get_vocabulary()),
embedding_dim=embedding_dim,
rnn_units=rnn_units)
"""
Explanation: Build The Model
This section defines the model as a keras.Model subclass (For details see Making new Layers and Models via subclassing).
This model has three layers:
tf.keras.layers.Embedding: The input layer. A trainable lookup table that will map each character-ID to a vector with embedding_dim dimensions;
tf.keras.layers.GRU: A type of RNN with size units=rnn_units (You can also use an LSTM layer here.)
tf.keras.layers.Dense: The output layer, with vocab_size outputs. It outputs one logit for each character in the vocabulary. These are the log-likelihood of each character according to the model.
End of explanation
"""
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
"""
Explanation: For each character the model looks up the embedding, runs the GRU one timestep with the embedding as input, and applies the dense layer to generate logits predicting the log-likelihood of the next character:
Note: For training you could use a keras.Sequential model here. To generate text later you'll need to manage the RNN's internal state. It's simpler to include the state input and output options upfront, than it is to rearrange the model architecture later. For more details see the Keras RNN guide.
Try the model
Now run the model to see that it behaves as expected.
First check the shape of the output:
End of explanation
"""
model.summary()
"""
Explanation: In the above example the sequence length of the input is 100 but the model can be run on inputs of any length:
End of explanation
"""
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices, axis=-1).numpy()
"""
Explanation: To get actual predictions from the model you need to sample from the output distribution, to get actual character indices. This distribution is defined by the logits over the character vocabulary.
Note: It is important to sample from this distribution as taking the argmax of the distribution can easily get the model stuck in a loop.
Try it for the first example in the batch:
End of explanation
"""
sampled_indices
"""
Explanation: This gives us, at each timestep, a prediction of the next character index:
End of explanation
"""
print("Input:\n", text_from_ids(input_example_batch[0]).numpy())
print()
print("Next Char Predictions:\n", text_from_ids(sampled_indices).numpy())
"""
Explanation: Decode these to see the text predicted by this untrained model:
End of explanation
"""
loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
mean_loss = example_batch_loss.numpy().mean()
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)")
print("Mean loss: ", mean_loss)
"""
Explanation: Train the model
At this point the problem can be treated as a standard classification problem. Given the previous RNN state, and the input this time step, predict the class of the next character.
Attach an optimizer, and a loss function
The standard tf.keras.losses.sparse_categorical_crossentropy loss function works in this case because it is applied across the last dimension of the predictions.
Because your model returns logits, you need to set the from_logits flag.
End of explanation
"""
tf.exp(mean_loss).numpy()
"""
Explanation: A newly initialized model shouldn't be too sure of itself, the output logits should all have similar magnitudes. To confirm this you can check that the exponential of the mean loss is approximately equal to the vocabulary size. A much higher loss means the model is sure of its wrong answers, and is badly initialized:
End of explanation
"""
model.compile(optimizer='adam', loss=loss)
"""
Explanation: Configure the training procedure using the tf.keras.Model.compile method. Use tf.keras.optimizers.Adam with default arguments and the loss function.
End of explanation
"""
# Directory where the checkpoints will be saved
checkpoint_dir = './training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
"""
Explanation: Configure checkpoints
Use a tf.keras.callbacks.ModelCheckpoint to ensure that checkpoints are saved during training:
End of explanation
"""
EPOCHS = 20
history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
"""
Explanation: Execute the training
To keep training time reasonable, use 10 epochs to train the model. In Colab, set the runtime to GPU for faster training.
End of explanation
"""
class OneStep(tf.keras.Model):
def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0):
super().__init__()
self.temperature = temperature
self.model = model
self.chars_from_ids = chars_from_ids
self.ids_from_chars = ids_from_chars
# Create a mask to prevent "[UNK]" from being generated.
skip_ids = self.ids_from_chars(['[UNK]'])[:, None]
sparse_mask = tf.SparseTensor(
# Put a -inf at each bad index.
values=[-float('inf')]*len(skip_ids),
indices=skip_ids,
# Match the shape to the vocabulary
dense_shape=[len(ids_from_chars.get_vocabulary())])
self.prediction_mask = tf.sparse.to_dense(sparse_mask)
@tf.function
def generate_one_step(self, inputs, states=None):
# Convert strings to token IDs.
input_chars = tf.strings.unicode_split(inputs, 'UTF-8')
input_ids = self.ids_from_chars(input_chars).to_tensor()
# Run the model.
# predicted_logits.shape is [batch, char, next_char_logits]
predicted_logits, states = self.model(inputs=input_ids, states=states,
return_state=True)
# Only use the last prediction.
predicted_logits = predicted_logits[:, -1, :]
predicted_logits = predicted_logits/self.temperature
# Apply the prediction mask: prevent "[UNK]" from being generated.
predicted_logits = predicted_logits + self.prediction_mask
# Sample the output logits to generate token IDs.
predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)
predicted_ids = tf.squeeze(predicted_ids, axis=-1)
# Convert from token ids to characters
predicted_chars = self.chars_from_ids(predicted_ids)
# Return the characters and model state.
return predicted_chars, states
one_step_model = OneStep(model, chars_from_ids, ids_from_chars)
"""
Explanation: Generate text
The simplest way to generate text with this model is to run it in a loop, and keep track of the model's internal state as you execute it.
Each time you call the model you pass in some text and an internal state. The model returns a prediction for the next character and its new state. Pass the prediction and state back in to continue generating text.
The following makes a single step prediction:
End of explanation
"""
start = time.time()
states = None
next_char = tf.constant(['ROMEO:'])
result = [next_char]
for n in range(1000):
next_char, states = one_step_model.generate_one_step(next_char, states=states)
result.append(next_char)
result = tf.strings.join(result)
end = time.time()
print(result[0].numpy().decode('utf-8'), '\n\n' + '_'*80)
print('\nRun time:', end - start)
"""
Explanation: Run it in a loop to generate some text. Looking at the generated text, you'll see the model knows when to capitalize, make paragraphs and imitates a Shakespeare-like writing vocabulary. With the small number of training epochs, it has not yet learned to form coherent sentences.
End of explanation
"""
start = time.time()
states = None
next_char = tf.constant(['ROMEO:', 'ROMEO:', 'ROMEO:', 'ROMEO:', 'ROMEO:'])
result = [next_char]
for n in range(1000):
next_char, states = one_step_model.generate_one_step(next_char, states=states)
result.append(next_char)
result = tf.strings.join(result)
end = time.time()
print(result, '\n\n' + '_'*80)
print('\nRun time:', end - start)
"""
Explanation: The easiest thing you can do to improve the results is to train it for longer (try EPOCHS = 30).
You can also experiment with a different start string, try adding another RNN layer to improve the model's accuracy, or adjust the temperature parameter to generate more or less random predictions.
If you want the model to generate text faster the easiest thing you can do is batch the text generation. In the example below the model generates 5 outputs in about the same time it took to generate 1 above.
End of explanation
"""
tf.saved_model.save(one_step_model, 'one_step')
one_step_reloaded = tf.saved_model.load('one_step')
states = None
next_char = tf.constant(['ROMEO:'])
result = [next_char]
for n in range(100):
next_char, states = one_step_reloaded.generate_one_step(next_char, states=states)
result.append(next_char)
print(tf.strings.join(result)[0].numpy().decode("utf-8"))
"""
Explanation: Export the generator
This single-step model can easily be saved and restored, allowing you to use it anywhere a tf.saved_model is accepted.
End of explanation
"""
class CustomTraining(MyModel):
@tf.function
def train_step(self, inputs):
inputs, labels = inputs
with tf.GradientTape() as tape:
predictions = self(inputs, training=True)
loss = self.loss(labels, predictions)
grads = tape.gradient(loss, model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, model.trainable_variables))
return {'loss': loss}
"""
Explanation: Advanced: Customized Training
The above training procedure is simple, but does not give you much control.
It uses teacher-forcing which prevents bad predictions from being fed back to the model, so the model never learns to recover from mistakes.
So now that you've seen how to run the model manually next you'll implement the training loop. This gives a starting point if, for example, you want to implement curriculum learning to help stabilize the model's open-loop output.
The most important part of a custom training loop is the train step function.
Use tf.GradientTape to track the gradients. You can learn more about this approach by reading the eager execution guide.
The basic procedure is:
Execute the model and calculate the loss under a tf.GradientTape.
Calculate the updates and apply them to the model using the optimizer.
End of explanation
"""
model = CustomTraining(
vocab_size=len(ids_from_chars.get_vocabulary()),
embedding_dim=embedding_dim,
rnn_units=rnn_units)
model.compile(optimizer = tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
model.fit(dataset, epochs=1)
"""
Explanation: The above implementation of the train_step method follows Keras' train_step conventions. This is optional, but it allows you to change the behavior of the train step and still use keras' Model.compile and Model.fit methods.
End of explanation
"""
EPOCHS = 10
mean = tf.metrics.Mean()
for epoch in range(EPOCHS):
start = time.time()
mean.reset_states()
for (batch_n, (inp, target)) in enumerate(dataset):
logs = model.train_step([inp, target])
mean.update_state(logs['loss'])
if batch_n % 50 == 0:
template = f"Epoch {epoch+1} Batch {batch_n} Loss {logs['loss']:.4f}"
print(template)
# saving (checkpoint) the model every 5 epochs
if (epoch + 1) % 5 == 0:
model.save_weights(checkpoint_prefix.format(epoch=epoch))
print()
print(f'Epoch {epoch+1} Loss: {mean.result().numpy():.4f}')
print(f'Time taken for 1 epoch {time.time() - start:.2f} sec')
print("_"*80)
model.save_weights(checkpoint_prefix.format(epoch=epoch))
"""
Explanation: Or if you need more control, you can write your own complete custom training loop:
End of explanation
"""
|
planetlabs/notebooks
|
jupyter-notebooks/proserve-interactive-trainings/data-API.ipynb
|
apache-2.0
|
import os
import json
import requests
PLANET_API_KEY = os.getenv('PL_API_KEY')
# Setup Planet Data API base URL
URL = "https://api.planet.com/data/v1"
# Setup the session
session = requests.Session()
# Authenticate
session.auth = (PLANET_API_KEY, "")
res = session.get(URL)
res.status_code
# Helper function to print formatted JSON using the json module
def p(data):
print(json.dumps(data, indent=2))
"""
Explanation: Setup
Locally store your Planet API key and start a session. Create a funciton to print json objects.
End of explanation
"""
# Setup the stats URL
stats_url = "{}/stats".format(URL)
"""
Explanation: Stats
Here you will perform a statistics search of planets database, while getting familiar with the various filtering options.
End of explanation
"""
date_filter = {
"type": "DateRangeFilter", # Type of filter -> Date Range
"field_name": "acquired", # The field to filter on: "acquired" -> Date on which the "image was taken"
"config": {
"gte": "2000-01-01T00:00:00.000Z", # "gte" -> Greater than or equal to
}
}
"""
Explanation: Set up the date filter to a time of your choice.
End of explanation
"""
geometry = {
"type": "GeometryFilter",
"field_name": "geometry",
"config": {
"type": "Point",
"coordinates": [
0,
0
]
}
}
"""
Explanation: Pick a coordinate over your AOI. You can select it from google maps.
End of explanation
"""
# Setup Cloud Filter
cloud_filter = {
"type": "RangeFilter",
"field_name": "cloud_cover",
"config": {
"lt": 0.0,
"gt": 0.0
}
}
"""
Explanation: Set up a cloud filter. Remember the lt stands for "less than" and gt stands for "greater than".
End of explanation
"""
and_filter = {
"type": "AndFilter",
"config": [geometry, date_filter, cloud_filter]
}
p(and_filter)
"""
Explanation: Now join all of the filters together.
End of explanation
"""
item_types = ["PSScene4Band"]
# Setup the request
request = {
"item_types" : item_types,
"interval" : "year",
"filter" : and_filter
}
# Send the POST request to the API stats endpoint
res=session.post(stats_url, json=request)
# Print response
p(res.json())
"""
Explanation: Search the database to see how many results fall in this category. Insert the Item Type of your choice.
End of explanation
"""
#Send a request to the item's asset url in order to activate it for download
#This step might take some time
asset_activated = False
while asset_activated == False:
res = session.get(assets_url)
assets = res.json()
asset_status = image["status"]
if asset_status == 'active':
asset_activated = True
print("Asset is active and ready to download")
p(image)
# Get the links for the item and find out what asset types are available
assets_url = feature["_links"]["assets"]
res = session.get(assets_url)
assets = res.json()
print(assets.keys())
# Setup the quick search endpoint url
# Create a request
quick_url = "{}/quick-search".format(URL)
item_types = ["PSScene4Band"]
request = {
"item_types" : item_types,
"filter" : and_filter
}
# Print the assets location endpoint for download
# Clicking on this url will download the image
location_url = image["location"]
print(location_url)
# Send the POST request to the API quick search endpoint
# Select the first feature from the search results and print its ID
# print the result
res = session.post(quick_url, json=request)
geojson = res.json()
feature = geojson["features"][0]
p(feature["id"])
# Pick an asset type
# Send a request to the activation url to activate the item
image = assets["analytic"]
activation_url = image["_links"]["activate"]
res = session.get(activation_url)
p(res.status_code)
"""
Explanation: Congratulations, you have performed your fist statistics search using the Data API!
Quick Search
Here you will perform a search for specific image ID's, using the search criterias you defined above, in order to Download them!
All the code you need is below in small chuncks, however they are in the wrong order! Re-order them correctly to Download your image.
End of explanation
"""
|
whitead/numerical_stats
|
unit_9/lectures/lecture_2.ipynb
|
gpl-3.0
|
import random
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, pi, erf
import scipy.stats
import numpy.linalg
"""
Explanation: Linear Algebra in NumPy
Unit 9, Lecture 2
Numerical Methods and Statistics
Prof. Andrew White, March 30, 2020
End of explanation
"""
matrix = [ [4,3], [6, 2] ]
print('As Python list:')
print(matrix)
np_matrix = np.array(matrix)
print('The shape of the array:', np.shape(np_matrix))
print('The numpy matrix/array:')
print(np_matrix)
"""
Explanation: Working with Matrices in Numpy
We saw earlier in the class how to create numpy matrices. Let's review that and learn about explicit initialization
Explicit Initialization
You can explitily set the values in your matrix by first creating a list and then converting it into a numpy array
End of explanation
"""
np_matrix_2 = np.array([
[ 4, 3],
[ 1, 2],
[-1, 4],
[ 4, 2]
])
print(np_matrix_2)
"""
Explanation: You can use multiple lines in python to specify your list. This can make the formatting cleaner
End of explanation
"""
np_matrix_3 = np.zeros( (2, 10) )
print(np_matrix_3)
np_matrix_3[:, 1] = 2
print(np_matrix_3)
np_matrix_3[0, :] = -1
print(np_matrix_3)
np_matrix_3[1, 6] = 43
print(np_matrix_3)
rows, columns = np.shape(np_matrix_3) #get the number of rows and columns
for i in range(columns): #Do a for loop over columns
np_matrix_3[1, i] = i ** 2 #Set the value of the 2nd row, ith column to be i^2
print(np_matrix_3)
"""
Explanation: Create and Set
You can also create an array and then set the elements.
End of explanation
"""
np_matrix_1 = np.random.random( (2, 4) ) #create a random 2 x 4 array
np_matrix_2 = np.random.random( (4, 1) ) #create a random 4 x 1 array
print(np_matrix_1.dot(np_matrix_2))
"""
Explanation: Linear Algebra
The linear algebra routines for python are in the numpy.linalg library. See here
Matrix Multiplication
Matrix multiplication is done with the dot method. Let's compare that with *
End of explanation
"""
print(np_matrix_1 @ np_matrix_2)
"""
Explanation: So, dot correctly gives us a 2x1 matrix as expected for the two shapes
Using the special @ character:
End of explanation
"""
print(np_matrix_1 * np_matrix_2)
"""
Explanation: The element-by-element multiplication, *, doesn't work on different sized arrays.
End of explanation
"""
print(np_matrix_1.dot(np_matrix_2))
print(np.dot(np_matrix_1, np_matrix_2))
"""
Explanation: Method vs Function
Instead of using dot as a method (it comes after a .), you can use the dot function as well. Let's see an example:
End of explanation
"""
import numpy.linalg as linalg
matrix = [ [1, 0], [0, 0] ]
np_matrix = np.array(matrix)
print(linalg.matrix_rank(np_matrix))
"""
Explanation: Matrix Rank
The rank of a matrix can be found with singular value decomposition. In numpy, we can do this simply with a call to linalg.matrix_rank
End of explanation
"""
#Enter the data as lists
a_matrix = [[3, 2, 1],
[2,-1,0],
[1,1,-2]]
b_matrix = [5, 4, 12]
#convert them to numpy arrays/matrices
np_a_matrix = np.array(a_matrix)
np_b_matrix = np.array(b_matrix).transpose()
#Solve the problem
np_a_inv = linalg.inv(np_a_matrix)
np_x_matrix = np_a_inv @ np_b_matrix
#print the solution
print(np_x_matrix)
#check to make sure the answer works
print(np_a_matrix @ np_x_matrix)
"""
Explanation: Matrix Inverse
The inverse of a matrix can be found using the linalg.inverse command. Consider the following system of equations:
$$\begin{array}{lr}
3 x + 2 y + z & = 5\
2 x - y & = 4 \
x + y - 2z & = 12 \
\end{array}$$
We can encode it as a matrix equation:
$$\left[\begin{array}{lcr}
3 & 2 & 1\
2 & -1 & 0\
1 & 1 & -2\
\end{array}\right]
\left[\begin{array}{l}
x\
y\
z\
\end{array}\right]
=
\left[\begin{array}{l}
5\
4\
12\
\end{array}\right]$$
$$\mathbf{A}\mathbf{x} = \mathbf{b}$$
$$\mathbf{A}^{-1}\mathbf{b} = \mathbf{x}$$
End of explanation
"""
x = 1
for i in range(10):
x = x - (x**2 - 612) / (2 * x)
print(i, x)
"""
Explanation: Computation cost for inverse
Computing a matrix inverse can be VERY expensive for large matrices. Do not exceed about 500 x 500 matrices
Eigenvectors/Eigenvalues
Before trying to understand what an eigenvector is, let's try to understand their analogue, a stationary point.
A stationary point of a function $f(x)$ is an $x$ such that:
$$x = f(x)$$
Consider this function:
$$f(x) = x - \frac{x^2 - 612}{2x}$$
If we found a stationary point, that would be mean that
$$x = x - \frac{x^2 - 612}{2x} $$
or
$$ x^2 = 612 $$
More generally, you can find a square root of $A$ by finding a stationary point to:
$$f(x) = x - \frac{x^2 - A}{2x} $$
In this case, you can find the stationary point by just doing $x_{i+1} = f(x_i)$ until you are stationary
End of explanation
"""
A = np.array([[3,1], [1,3]])
e_values, e_vectors = np.linalg.eig(A)
print(e_vectors)
print(e_values)
"""
Explanation: Eigenvectors/Eigenvalues
Matrices are analogues of functions. They take in a vector and return a vector.
$$\mathbf{A}\mathbf{x} = \mathbf{y}$$
Just like stationary points, there is sometimes a special vector which has this property:
$$\mathbf{A}\mathbf{x} = \mathbf{x}$$
Such a vector is called an eigenvector. It turns out such vectors are rarely always exists. If we instead allow a scalar, we can find a whole bunch like this:
$$\mathbf{A}\mathbf{v} =\lambda\mathbf{v}$$
These are like the stationary points above, except we are getting back our input times a constant. That means it's a particular direction that is unchanged, not the value.
Finding Eigenvectors/Eigenvalues
Eigenvalues/eigenvectors can be found easily as well in python, including for complex numbers and sparse matrices. The command linalg.eigh will return only the real eigenvalues/eigenvectors. That assumes your matrix is Hermitian, meaning it is symmetric (if your matrix is real numbers). Use eig to get general possibly complex eigenvalues Here's an easy example:
Let's consider this matrix:
$$
A = \left[\begin{array}{lr}
3 & 1\
1 & 3\
\end{array}\right]$$
Imagine it as a geometry operator. It takes in a 2D vector and morphs it into another 2D vector.
$$\vec{x} = [1, 0]$$
$$A \,\vec{x}^T = [3, 1]^T$$
Now is there a particular direction where $\mathbf{A}$ cannot affect it?
End of explanation
"""
v1 = e_vectors[:,0]
v2 = e_vectors[:,1]
A @ v1
"""
Explanation: So that means $v_1 = [0.7, 0.7]$ and $v_2 = [-0.7, 0.7]$. Let's find out:
End of explanation
"""
A = np.random.normal(size=(3,3))
e_values, e_vectors = linalg.eig(A)
print(e_values)
print(e_vectors)
"""
Explanation: Yes, that is the same direction! And notice that it's 4 times as much as the input vector, which is what the eigenvalue is telling us.
A random matrix will almost never be Hermitian, so look out for complex numbers. In engineering, your matrices commonly be Hermitian.
End of explanation
"""
|
quantopian/research_public
|
notebooks/lectures/Introduction_to_Futures/notebook.ipynb
|
apache-2.0
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Introduction to Futures Contracts
by Maxwell Margenot and Delaney Mackenzie
Part of the Quantopian Lecture Series:
www.quantopian.com/lectures
github.com/quantopian/research_public
Futures contracts are derivatives and they are fundamentally different from equities, so it is important to understand what they are and how they work. In this lecture we will detail the basic unit of a futures contract, the forward contract, specifics on the valuation of futures contracts, and some things to keep in mind when handling futures. Our goal here is to cover what makes futures tick before we get into performing any sort of statistical analysis of them.
End of explanation
"""
# K is the delivery price agreed upon in the contract
K = 50
# Here we look at various different values that S_T can have
S_T = np.linspace(0, 100, 200)
# Calculate the long and short payoffs
long_payoff = S_T - K
short_payoff = K - S_T
"""
Explanation: Derivatives
We have stated that a futures contract is a derivative, so let's be clear on what a derivative is. A derivative is a financial instrument whose value is dependent on the value of an underlying asset. This can be a complex relationship or it can be something very simple. Derivatives have been constructed for a variety of different purposes in order to make more and more intricate bets about the markets. They provide new ways to express your expectations of how the markets will move and are used to great effect in conjunction with more conventional investments. Large amounts of resources are devoted to the construction and pricing of exotic derivatives, though that is beyond the scope of this lecture.
A futures contract is a standardized version of one of the simplest possible derivatives, the forward contract. Let's look at how forward contracts behave to give us a little more background on futures.
Forward Contracts
A futures contract at its heart is based on a derivative called a forward contract. This is an agreement between two parties to pay a delivery price, $K$, for an asset at some predetermined point in the future. Forward contracts are basic over the counter (OTC) derivatives, typically used for hedging. They are used for neutralizing risk by locking the price for an asset, obligating both sides of the contract to follow through.
Entering into a long position on a forward contract entails agreeing to buy the underlying asset while entering into a short position entails agreeing to sell the underlying. Let's say that the price of the good is denotated by $S_i$, indexed with time, with $T$ being the maturity date of the forward contract. Then the payoff of a long position in a forward contract is:
$$ S_T - K $$
And the payoff of a short position in a forward contract is:
$$ K - S_T$$
Where $S_T$ is the value of the underlying at maturity and $K$ is the value agreed upon for the underlying at maturity. The specific value of $K$ is negotiated between the parties entering into a forward contract together so it can vary quite a bit, depending on the relevant parties.
The payoff of a derivative is simply the realized cash value at the end of its life. This settlement can take place with either the delivery and exchange of actual goods or a simple cash settlement. As we can see in the following graphs, a forward contract has a linear payoff.
End of explanation
"""
plt.plot(S_T, long_payoff)
plt.axhline(0, color='black', alpha=0.3)
plt.axvline(0, color='black', alpha=0.3)
plt.xlim(0, 100)
plt.ylim(-100, 100)
plt.axvline(K, linestyle='dashed', color='r', label='K')
plt.ylabel('Payoff')
plt.xlabel('$S_T$')
plt.title('Payoff of a Long Forward Contract')
plt.legend();
"""
Explanation: This is the long side payoff:
End of explanation
"""
plt.plot(S_T, short_payoff);
plt.axhline(0, color='black', alpha=0.3)
plt.axvline(0, color='black', alpha=0.3)
plt.xlim(0, 100)
plt.ylim(-100, 100)
plt.axvline(K, linestyle='dashed', color='r', label='K')
plt.ylabel('Payoff')
plt.xlabel('$S_T$')
plt.title('Payoff of a Short Forward Contract')
plt.legend();
"""
Explanation: And this is the short side payoff:
End of explanation
"""
contract = symbols('CNH17')
futures_position_value = get_pricing(contract, start_date = '2017-01-19', end_date = '2017-02-15', fields = 'price')
futures_position_value.name = futures_position_value.name.symbol
futures_position_value.plot()
plt.title('Corn Futures Price')
plt.xlabel('Date')
plt.ylabel('Price');
"""
Explanation: For a long position on a forward contract, you benefit if the price at expiry is greater than the delivery price, while the opposite holds with a short position. However, even if you do not make a profit on your position there can be advantages. A forward contract locks in a price for a transaction, removing any uncertainty that you may have about a sale or purchase in the future. This is advantageous in cases where you know what you will need at some point in the future (or have a good idea of what you will need due to your models).
Hedging with a forward contract serves to help remove any sort of uncertainty about the price that you will pay (or be paid) for a good. If you are a producer, you can easily protect yourself against falling prices by using a short position in a forward contract with a delivery price that you find amenable. Similarly, if you are a buyer, you can easily protect yourself with a long position.
Say that you need a certain quantity of copper for circuit board production in May. You could wait until May to purchase the appropriate amount, but you will be at the mercy of the spot market, the market where assets are traded for immediate delivery. Depending on your risk model and the cost of housing large amounts of copper, it may be more reasonable to enter into a forward contract for delivery in May with a distributor today. This way you are more prepared to meet your production demands.
Forward Contract Issues
Of course, we seldom know exactly when we will need an asset. In addition, entering into a private agreement with another party exposes you to counterparty risk, the risk that one or the other party in a transaction will renege on a deal. How the contract is actually settled is also up to the two parties, whether it be with an exchange of assets or a simple cash reconciliation. Forward contracts leave the delivery date, delivery method, and quantity up for debate. Their OTC nature gives a large degree of customizability, but directly contributes to the aforementioned issues and a lack of liquidity. It is unlikely that another party will be willing to take on an agreement that is highly customized to someone else's terms. There are definitely merits for the various possibilities for hedging that forward contracts provide, however, which is where futures contracts come in.
How is a Futures Contract Different?
Futures contracts are forward contracts that have been standardized for trade on an exchange. A single futures contract is for a set amount of the underlying with agreed-upon settlement, delivery date, and terms. On top of this, the exchange acts as an intermediary, virtually eliminating counterparty risk. However, this isn't to say that all futures contracts are standardized across the entire futures market. Futures for a given asset are standardized, so the terms of corn futures may differ from the terms of pork bellies futures.
Another quirk of futures contracts is that they are settled daily with a margin account at a broker held by the holder of the futures contract. Each day, the change in price of the underlying is reflected in an increase or a decrease in the amount of money in the margin account. This process is called "marking to market".
Marking to Market and Margin Accounts
DISCLAIMER: Margin is not currently modeled automatically on Quantopian. You should restrict your total position allocations manually.
Entering into a futures trade entails putting up a certain amount of cash. This amount will vary depending on the terms of the contract and is called your initial margin. This cash goes into a margin account held with the broker you are doing your trading with. Each day, the value of the futures contract position is marked to market. This means that any change in the futures price over the course of the day is reflected by a change in the margin account balance proportional to the number of contracts that you hold positions in.
You can withdraw any excess in the account over the initial margin if you so choose, but it is important to be mindful of keeping cash available to the broker, above the line of the maintenance margin. The maintenance margin is again determined by the terms of the contract. If the balance in your margin account falls below the maintenance margin, the broker will issue a margin call. To comply, you must top up the account with cash up to the initial margin again. If you choose not to or fail to meet the margin call, your position in the contract is closed.
Example: Corn Futures
Let's say that we want to get five corn futures contracts. Each corn contract is standardized for $5000$ bushels of corn (Around $127$ metric tons!) and corn is quoted in cents per bushel. Let's also say that our initial margin is $\$990$ per contract when we enter a position and our maintenance margin is $\$900$ per contract.
We can look at how this example would play out with actual numbers. Let's pull a small section of pricing data for a corn contract and imagine that we entered into a position and held it until maturity.
End of explanation
"""
initial_margin = 990
maintenance_margin = 900
contract_count = 5
"""
Explanation: The plot shows some signifiant decreases in price over the chosen time period, which should be reflected by drops in the margin account.
End of explanation
"""
# We hit two margin calls over this time period
margin_account_changes = futures_position_value.diff()*contract.multiplier*contract_count
margin_account_changes[0] = initial_margin*contract_count
margin_account_balance = margin_account_changes.cumsum()
margin_account_balance.name = 'Margin Account Balance'
# First margin call
margin_call_idx = np.where(margin_account_balance < maintenance_margin*contract_count)[0][0]
margin_deposit = initial_margin*contract_count - margin_account_balance[margin_call_idx]
margin_account_balance[margin_call_idx+1:] = margin_account_balance[margin_call_idx+1:] + margin_deposit
# Second margin call
second_margin_call_idx = np.where(margin_account_balance < maintenance_margin*contract_count)[0][1]
second_margin_deposit = initial_margin*contract_count - margin_account_balance[second_margin_call_idx]
margin_account_balance[second_margin_call_idx+1:] = margin_account_balance[second_margin_call_idx+1:] + second_margin_deposit
(futures_position_value*contract.multiplier).plot()
margin_account_balance.plot()
plt.axvline(margin_account_balance.index[margin_call_idx], color='r', linestyle='--')
plt.axvline(margin_account_balance.index[second_margin_call_idx], color='r', linestyle='--')
plt.axhline(maintenance_margin*contract_count, color='r', linestyle='--')
plt.title('Overall Value of a Futures Contract with the Margin Account Balance')
plt.xlabel('Date')
plt.ylabel('Value')
plt.legend();
"""
Explanation: Here we calculate when a margin call would occur as the futures price and margin account balance change.
End of explanation
"""
contracts = symbols(['ESH17', 'NGH17'])
volume_comparison = get_pricing(contracts, start_date = '2016-12-01', end_date = '2017-04-01', fields = 'volume')
volume_comparison.plot()
plt.title('Volume of S&P 500 E-Mini and Natural Gas Contracts for March Delivery')
plt.xlabel('Date')
plt.ylabel('Volume');
print volume_comparison.max()
"""
Explanation: The jump in the margin account balance that occurs after each vertical dotted line is the point at which we meet the margin call, increasing the margin account balance to our initial margin once more. Note that the lagged response to the second theoretical margin call in this example is due to a weekend. Notice how a small perturbations in the futures price lead to large changes in the margin account balance. This is a consequence of the inherent leverage.
Financial vs. Commodity Futures
You can enter into futures contracts on many different types of assets. These range from actual, physical goods such as corn or wheat to more abstract assets, such as some multiplier times a stock market index. Futures contracts based on physical goods are generally called commodity futures, while those based on financial instruments are called financial futures. These can be further broken down into categories based on the general class of commodity or financial instrument.
In general, financial futures are more liquid than commodity futures. Let's compare the volume of two contracts deliverable in March 2017, one on the S&P 500 Index and the other on natural gas.
End of explanation
"""
cls = symbols(['CLF16', 'CLG16', 'CLH16'])
contract_volume = get_pricing(cls, start_date='2015-10-01', end_date='2016-04-01', fields='volume')
contract_volume.plot()
plt.title('Volume of Contracts with Different Expiry')
plt.xlabel('Date')
plt.ylabel('Volume');
"""
Explanation: The S&P 500 E-Mini contract has a value based on 50 units of the value of the S&P 500 Index. This financial future has a significant advantage in liquidity compared to natural gas for the same expiry. It likely helps that the S&P 500 E-Mini cash-settled, while the natural gas contract requires arrangements to be made for transportation and storage of fuel, but the main takeaway here is that there are a lot more people trying to trade financial futures.
Delivery and Naming
Different futures contracts will differ on the available delivery months. Some contracts have delivery every month, while some only have delivery a few times a year. The naming conventions for a given futures contract include the delivery month and year for the specific contract that they refer to. The month codes are standardized and well-documented, but the specific symbol that refers to the underlying varies depending on the broker. For an overview of the contract names that we use on Quantopian, please refer to the Futures API Introduction.
The delivery terms of a futures contract are listed in the contract specifications for that underlying asset. With commodity futures, this often includes terms for the physical delivery of, for example, 1000 barrels of oil. This will vary between assets. Some contracts, particularly financials, allow for cash settlement, making it easier to deliver.
Closing a Futures Position
In order to close a futures position, you simply take up an opposite position in the same contract. The broker will see that you have two opposite positions in the same asset so you are flat, effectively closing the account's exposure. As this requires actually being able to open the opposing position, care needs to be taken to do this in a timely manner as futures have varying liquidity as they approach expiry. The majority of volume for a given contract tends to take place during this same period of time, but there is a chance that liquidity may drop and you will be unable to close your futures positions, resulting in you taking delivery.
The delivery date calendar varies from underlying to underlying and from month to month, which means that you have to take proper care to make sure you unwind your positions in a timely manner.
Here we plot the volume of futures contracts on "Light Sweet Crude Oil" with January, February, and March delivery.
End of explanation
"""
cl_january_contract = symbols('CLF16')
print cl_january_contract.expiration_date
"""
Explanation: As one contract fades out of the spotlight, the contract for the next month fades in. It is common practice to roll over positions in contracts, closing the previous month's positions and opening up equivalent positions in the next set of contracts. Note that when you create a futures object, you can access the expiration_date attribute to see when the contract will stop trading.
End of explanation
"""
es_march_contract = symbols('ESH17')
print es_march_contract.expiration_date
"""
Explanation: The expiration date for this crude oil contract is in December, but the delivery does not occur until January. This time lag between expiration and delivery varies for different underlyings. For example, the S&P 500 E-Mini contract, a financial future, has an expiration date in the same month as its delivery.
End of explanation
"""
assets = ['SPY', 'ESH16']
prices = get_pricing(assets, start_date = '2015-01-01', end_date = '2016-04-15', fields = 'price')
prices.columns = map(lambda x: x.symbol, prices.columns)
prices['ESH16'].plot()
(10*prices['SPY']).plot()
plt.legend()
plt.title('Price of a S&P 500 E-Mini Contract vs SPY')
plt.xlabel('Date')
plt.ylabel('Price');
"""
Explanation: Spot Prices and Futures Prices
An important feature of futures markets is that as a contract approaches its expiry, its futures price will converge to the spot price. To show this, we will examine how SPY and a S&P 500 E-Mini contract move against each other. SPY tracks the S&P 500 Index, which is the underlying for the S&P 500 E-Mini contract. If we plot ten times the price of the ETF (the value is scaled down from the actual index), then ideally the difference between them should go to 0 as we approach the expiry of the contract.
End of explanation
"""
X = (10*prices['SPY'][:'2016-03-15'] - prices['ESH16'][:'2016-03-15'])**2
X.plot()
plt.title('MSE of SPY and ESH17')
plt.xlabel('Date')
plt.ylabel('MSE');
"""
Explanation: Looking at a plot of the prices does not tell us very much, unfortunately. It looks like the values might be getting closer, but we cannot quite tell. Let's look instead at the mean squared error between the ETF and futures prices.
End of explanation
"""
contracts = symbols(['CLF17', 'CLG17', 'CLH17', 'CLJ17'])
prices = get_pricing(contracts, start_date='2016-11-01', end_date='2016-12-15', fields='price')
prices.columns = map(lambda x: x.symbol, prices.columns)
prices.plot();
"""
Explanation: This indeed seems to corroborate the point that futures prices approach the spot at expiry. And this makes sense. If we are close to expiry, there should be little difference between the price of acquiring a commodity or asset now and the price at the expiry date.
Connection Between Spot and Futures Prices
There are several ways to theoretically model futures prices, just as there are many models to model equity prices. A very basic model of futures prices and spot prices connects them through a parameter called the cost of carry. The cost of carry acts as a discount factor for futures prices, such that
$$ F(t, T) = S(t)\times (1 + c)^{T - t} $$
where $F(t, T)$ is the futures price at time $t$ for maturity $T$, $S(t)$ is the spot price at time $t$, and $c$ is the cost of carry (here assumed to be constant). With continuous compounding, this relationship becomes:
$$ F(t, T) = S(t)e^{c(T - t)} $$
This is a naive representation of the relationship in that it relies on a constant rate as well as a few other factors. Depending on the underlying asset, the cost of carry may be composed of several different things. For example, for a physical commodity, it may incorporate storage costs and the convenience yield for immediate access through the spot market, while for some financial commodities it may only encompass the risk free rate.
The cost of carry on futures can be thought of similarly to dividends on stocks. When considering futures prices of a single underlying through several different maturities, adjustments must be made to account for the cost of carry when switching to a new maturity.
The further out we are from expiry, the more the cost of carry impacts the price. Here is a plot of the prices of contracts on light sweet crude for January, February, March, and April 2017 delivery. The further out the contract is from expiry, the higher the price.
End of explanation
"""
# A toy example to show Contango
N = 100 # Days to expiry of futures contract
cost_of_carry = 0.01
spot_price = pd.Series(np.ones(N), name = "Spot Price")
futures_price = pd.Series(np.ones(N), name = "Futures Price")
spot_price[0] = 20
futures_price[0] = spot_price[0]*np.exp(cost_of_carry*N)
for n in range(1, N):
spot_price[n] = spot_price[n-1]*(1 + np.random.normal(0, 0.05))
futures_price[n] = spot_price[n]*np.exp(cost_of_carry*(N - n))
spot_price.plot()
futures_price.plot()
plt.legend()
plt.title('Contango')
plt.xlabel('Time')
plt.ylabel('Price');
"""
Explanation: Contango and Backwardation
Often in futures markets we expect the futures price to be above the spot price. In this case, we can infer that participants in the market are willing to pay a premium for avoiding storage costs and the like. We call the difference between the futures price and the spot price the basis. A higher futures price than spot price indicates a positive basis, a situation which we call contango. With our cost of carry model, a positive cost of carry indicates contango.
End of explanation
"""
# A toy example to show Backwardation
N = 100 # Days to expiry of futures contract
cost_of_carry = -0.01
spot_price = pd.Series(np.ones(N), name = "Spot Price")
futures_price = pd.Series(np.ones(N), name = "Futures Price")
spot_price[0] = 20
futures_price[0] = spot_price[0]*np.exp(cost_of_carry*N)
for n in range(1, N):
spot_price[n] = spot_price[n-1]*(1 + np.random.normal(0, 0.05))
futures_price[n] = spot_price[n]*np.exp(cost_of_carry*(N - n))
spot_price.plot()
futures_price.plot()
plt.legend()
plt.title('Backwardation')
plt.xlabel('Time')
plt.ylabel('Price');
"""
Explanation: Backwardation occurs when the spot price is above the futures price and we have a negative basis. What this means is that it is cheaper to buy something right now than it would be to lock down for the future. This equates to a negative cost of carry.
End of explanation
"""
|
gpotter2/scapy
|
doc/notebooks/Scapy in 15 minutes.ipynb
|
gpl-2.0
|
send(IP(dst="1.2.3.4")/TCP(dport=502, options=[("MSS", 0)]))
"""
Explanation: Scapy in 15 minutes (or longer)
Guillaume Valadon & Pierre Lalet
Scapy is a powerful Python-based interactive packet manipulation program and library. It can be used to forge or decode packets for a wide number of protocols, send them on the wire, capture them, match requests and replies, and much more.
This iPython notebook provides a short tour of the main Scapy features. It assumes that you are familiar with networking terminology. All examples were built using the development version from https://github.com/secdev/scapy, and tested on Linux. They should work as well on OS X, and other BSD.
The current documentation is available on http://scapy.readthedocs.io/ !
Scapy eases network packets manipulation, and allows you to forge complicated packets to perform advanced tests. As a teaser, let's have a look a two examples that are difficult to express without Scapy:
1_ Sending a TCP segment with maximum segment size set to 0 to a specific port is an interesting test to perform against embedded TCP stacks. It can be achieved with the following one-liner:
End of explanation
"""
ans = sr([IP(dst="8.8.8.8", ttl=(1, 8), options=IPOption_RR())/ICMP(seq=RandShort()), IP(dst="8.8.8.8", ttl=(1, 8), options=IPOption_Traceroute())/ICMP(seq=RandShort()), IP(dst="8.8.8.8", ttl=(1, 8))/ICMP(seq=RandShort())], verbose=False, timeout=3)[0]
ans.make_table(lambda x, y: (", ".join(z.summary() for z in x[IP].options) or '-', x[IP].ttl, y.sprintf("%IP.src% %ICMP.type%")))
"""
Explanation: 2_ Advanced firewalking using IP options is sometimes useful to perform network enumeration. Here is a more complicated one-liner:
End of explanation
"""
from scapy.all import *
"""
Explanation: Now that we've got your attention, let's start the tutorial !
Quick setup
The easiest way to try Scapy is to clone the github repository, then launch the run_scapy script as root. The following examples can be pasted at the Scapy prompt. There is no need to install any external Python modules.
```shell
git clone https://github.com/secdev/scapy --depth=1
sudo ./run_scapy
Welcome to Scapy (2.4.0)
```
Note: iPython users must import scapy as follows
End of explanation
"""
packet = IP()/TCP()
Ether()/packet
"""
Explanation: First steps
With Scapy, each network layer is a Python class.
The '/' operator is used to bind layers together. Let's put a TCP segment on top of IP and assign it to the packet variable, then stack it on top of Ethernet.
End of explanation
"""
>>> ls(IP, verbose=True)
version : BitField (4 bits) = (4)
ihl : BitField (4 bits) = (None)
tos : XByteField = (0)
len : ShortField = (None)
id : ShortField = (1)
flags : FlagsField (3 bits) = (0)
MF, DF, evil
frag : BitField (13 bits) = (0)
ttl : ByteField = (64)
proto : ByteEnumField = (0)
chksum : XShortField = (None)
src : SourceIPField (Emph) = (None)
dst : DestIPField (Emph) = (None)
options : PacketListField = ([])
"""
Explanation: This last output displays the packet summary. Here, Scapy automatically filled the Ethernet type as well as the IP protocol field.
Protocol fields can be listed using the ls() function:
End of explanation
"""
p = Ether()/IP(dst="www.secdev.org")/TCP()
p.summary()
"""
Explanation: Let's create a new packet to a specific IP destination. With Scapy, each protocol field can be specified. As shown in the ls() output, the interesting field is dst.
Scapy packets are objects with some useful methods, such as summary().
End of explanation
"""
print(p.dst) # first layer that has an src field, here Ether
print(p[IP].src) # explicitly access the src field of the IP layer
# sprintf() is a useful method to display fields
print(p.sprintf("%Ether.src% > %Ether.dst%\n%IP.src% > %IP.dst%"))
"""
Explanation: There are not many differences with the previous example. However, Scapy used the specific destination to perform some magic tricks !
Using internal mechanisms (such as DNS resolution, routing table and ARP resolution), Scapy has automatically set fields necessary to send the packet. These fields can of course be accessed and displayed.
End of explanation
"""
print(p.sprintf("%TCP.flags% %TCP.dport%"))
"""
Explanation: Scapy uses default values that work most of the time. For example, TCP() is a SYN segment to port 80.
End of explanation
"""
[p for p in IP(ttl=(1,5))/ICMP()]
"""
Explanation: Moreover, Scapy has implicit packets. For example, they are useful to make the TTL field value vary from 1 to 5 to mimic traceroute.
End of explanation
"""
p = sr1(IP(dst="8.8.8.8")/UDP()/DNS(qd=DNSQR()))
p[DNS].an
"""
Explanation: Sending and receiving
Currently, you know how to build packets with Scapy. The next step is to send them over the network !
The sr1() function sends a packet and returns the corresponding answer. srp1() does the same for layer two packets, i.e. Ethernet. If you are only interested in sending packets send() is your friend.
As an example, we can use the DNS protocol to get www.example.com IPv4 address.
End of explanation
"""
r, u = srp(Ether()/IP(dst="8.8.8.8", ttl=(5,10))/UDP()/DNS(rd=1, qd=DNSQR(qname="www.example.com")))
r, u
"""
Explanation: Another alternative is the sr() function. Like srp1(), the sr1() function can be used for layer 2 packets.
End of explanation
"""
# Access the first tuple
print(r[0][0].summary()) # the packet sent
print(r[0][1].summary()) # the answer received
# Access the ICMP layer. Scapy received a time-exceeded error message
r[0][1][ICMP]
"""
Explanation: sr() sent a list of packets, and returns two variables, here r and u, where:
1. r is a list of results (i.e tuples of the packet sent and its answer)
2. u is a list of unanswered packets
End of explanation
"""
wrpcap("scapy.pcap", r)
pcap_p = rdpcap("scapy.pcap")
pcap_p[0]
"""
Explanation: With Scapy, list of packets, such as r or u, can be easily written to, or read from PCAP files.
End of explanation
"""
s = sniff(count=2)
s
"""
Explanation: Sniffing the network is as straightforward as sending and receiving packets. The sniff() function returns a list of Scapy packets, that can be manipulated as previously described.
End of explanation
"""
sniff(count=2, prn=lambda p: p.summary())
"""
Explanation: sniff() has many arguments. The prn one accepts a function name that will be called on received packets. Using the lambda keyword, Scapy could be used to mimic the tshark command behavior.
End of explanation
"""
import socket
sck = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # create an UDP socket
sck.connect(("8.8.8.8", 53)) # connect to 8.8.8.8 on 53/UDP
# Create the StreamSocket and gives the class used to decode the answer
ssck = StreamSocket(sck)
ssck.basecls = DNS
# Send the DNS query
ssck.sr1(DNS(rd=1, qd=DNSQR(qname="www.example.com")))
"""
Explanation: Alternatively, Scapy can use OS sockets to send and receive packets. The following example assigns an UDP socket to a Scapy StreamSocket, which is then used to query www.example.com IPv4 address.
Unlike other Scapy sockets, StreamSockets do not require root privileges.
End of explanation
"""
ans, unans = srloop(IP(dst=["8.8.8.8", "8.8.4.4"])/ICMP(), inter=.1, timeout=.1, count=100, verbose=False)
"""
Explanation: Visualization
Parts of the following examples require the matplotlib module.
With srloop(), we can send 100 ICMP packets to 8.8.8.8 and 8.8.4.4.
End of explanation
"""
%matplotlib inline
ans.multiplot(lambda x, y: (y[IP].src, (y.time, y[IP].id)), plot_xy=True)
"""
Explanation: Then we can use the results to plot the IP id values.
End of explanation
"""
pkt = IP() / UDP() / DNS(qd=DNSQR())
print(repr(raw(pkt)))
"""
Explanation: The raw() constructor can be used to "build" the packet's bytes as they would be sent on the wire.
End of explanation
"""
print(pkt.summary())
"""
Explanation: Since some people cannot read this representation, Scapy can:
- give a summary for a packet
End of explanation
"""
hexdump(pkt)
"""
Explanation: "hexdump" the packet's bytes
End of explanation
"""
pkt.show()
"""
Explanation: dump the packet, layer by layer, with the values for each field
End of explanation
"""
pkt.canvas_dump()
"""
Explanation: render a pretty and handy dissection of the packet
End of explanation
"""
ans, unans = traceroute('www.secdev.org', maxttl=15)
"""
Explanation: Scapy has a traceroute() function, which basically runs a sr(IP(ttl=(1..30)) and creates a TracerouteResult object, which is a specific subclass of SndRcvList().
End of explanation
"""
ans.world_trace()
"""
Explanation: The result can be plotted with .world_trace() (this requires GeoIP module and data, from MaxMind)
End of explanation
"""
ans = sr(IP(dst=["scanme.nmap.org", "nmap.org"])/TCP(dport=[22, 80, 443, 31337]), timeout=3, verbose=False)[0]
ans.extend(sr(IP(dst=["scanme.nmap.org", "nmap.org"])/UDP(dport=53)/DNS(qd=DNSQR()), timeout=3, verbose=False)[0])
ans.make_table(lambda x, y: (x[IP].dst, x.sprintf('%IP.proto%/{TCP:%r,TCP.dport%}{UDP:%r,UDP.dport%}'), y.sprintf('{TCP:%TCP.flags%}{ICMP:%ICMP.type%}')))
"""
Explanation: The PacketList.make_table() function can be very helpful. Here is a simple "port scanner":
End of explanation
"""
class DNSTCP(Packet):
name = "DNS over TCP"
fields_desc = [ FieldLenField("len", None, fmt="!H", length_of="dns"),
PacketLenField("dns", 0, DNS, length_from=lambda p: p.len)]
# This method tells Scapy that the next packet must be decoded with DNSTCP
def guess_payload_class(self, payload):
return DNSTCP
"""
Explanation: Implementing a new protocol
Scapy can be easily extended to support new protocols.
The following example defines DNS over TCP. The DNSTCP class inherits from Packet and defines two field: the length, and the real DNS message. The length_of and length_from arguments link the len and dns fields together. Scapy will be able to automatically compute the len value.
End of explanation
"""
# Build then decode a DNS message over TCP
DNSTCP(raw(DNSTCP(dns=DNS())))
"""
Explanation: This new packet definition can be direcly used to build a DNS message over TCP.
End of explanation
"""
import socket
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create an TCP socket
sck.connect(("8.8.8.8", 53)) # connect to 8.8.8.8 on 53/TCP
# Create the StreamSocket and gives the class used to decode the answer
ssck = StreamSocket(sck)
ssck.basecls = DNSTCP
# Send the DNS query
ssck.sr1(DNSTCP(dns=DNS(rd=1, qd=DNSQR(qname="www.example.com"))))
"""
Explanation: Modifying the previous StreamSocket example to use TCP allows to use the new DNSCTP layer easily.
End of explanation
"""
from scapy.all import *
import argparse
parser = argparse.ArgumentParser(description="A simple ping6")
parser.add_argument("ipv6_address", help="An IPv6 address")
args = parser.parse_args()
print(sr1(IPv6(dst=args.ipv6_address)/ICMPv6EchoRequest(), verbose=0).summary())
"""
Explanation: Scapy as a module
So far, Scapy was only used from the command line. It is also a Python module than can be used to build specific network tools, such as ping6.py:
End of explanation
"""
# Specify the Wi-Fi monitor interface
#conf.iface = "mon0" # uncomment to test
# Create an answering machine
class ProbeRequest_am(AnsweringMachine):
function_name = "pram"
# The fake mac of the fake access point
mac = "00:11:22:33:44:55"
def is_request(self, pkt):
return Dot11ProbeReq in pkt
def make_reply(self, req):
rep = RadioTap()
# Note: depending on your Wi-Fi card, you might need a different header than RadioTap()
rep /= Dot11(addr1=req.addr2, addr2=self.mac, addr3=self.mac, ID=RandShort(), SC=RandShort())
rep /= Dot11ProbeResp(cap="ESS", timestamp=time.time())
rep /= Dot11Elt(ID="SSID",info="Scapy !")
rep /= Dot11Elt(ID="Rates",info=b'\x82\x84\x0b\x16\x96')
rep /= Dot11Elt(ID="DSset",info=chr(10))
OK,return rep
# Start the answering machine
#ProbeRequest_am()() # uncomment to test
"""
Explanation: Answering machines
A lot of attack scenarios look the same: you want to wait for a specific packet, then send an answer to trigger the attack.
To this extent, Scapy provides the AnsweringMachine object. Two methods are especially useful:
1. is_request(): return True if the pkt is the expected request
2. make_reply(): return the packet that must be sent
The following example uses Scapy Wi-Fi capabilities to pretend that a "Scapy !" access point exists.
Note: your Wi-Fi interface must be set to monitor mode !
End of explanation
"""
from scapy.all import *
import nfqueue, socket
def scapy_cb(i, payload):
s = payload.get_data() # get and parse the packet
p = IP(s)
# Check if the packet is an ICMP Echo Request to 8.8.8.8
if p.dst == "8.8.8.8" and ICMP in p:
# Delete checksums to force Scapy to compute them
del(p[IP].chksum, p[ICMP].chksum)
# Set the ICMP sequence number to 0
p[ICMP].seq = 0
# Let the modified packet go through
ret = payload.set_verdict_modified(nfqueue.NF_ACCEPT, raw(p), len(p))
else:
# Accept all packets
payload.set_verdict(nfqueue.NF_ACCEPT)
# Get an NFQUEUE handler
q = nfqueue.queue()
# Set the function that will be call on each received packet
q.set_callback(scapy_cb)
# Open the queue & start parsing packes
q.fast_open(2807, socket.AF_INET)
q.try_run()
"""
Explanation: Cheap Man-in-the-middle with NFQUEUE
NFQUEUE is an iptables target than can be used to transfer packets to userland process. As a nfqueue module is available in Python, you can take advantage of this Linux feature to perform Scapy based MiTM.
This example intercepts ICMP Echo request messages sent to 8.8.8.8, sent with the ping command, and modify their sequence numbers. In order to pass packets to Scapy, the following iptable command put packets into the NFQUEUE #2807:
$ sudo iptables -I OUTPUT --destination 8.8.8.8 -p icmp -o eth0 -j NFQUEUE --queue-num 2807
End of explanation
"""
class TCPScanner(Automaton):
@ATMT.state(initial=1)
def BEGIN(self):
pass
@ATMT.state()
def SYN(self):
print("-> SYN")
@ATMT.state()
def SYN_ACK(self):
print("<- SYN/ACK")
raise self.END()
@ATMT.state()
def RST(self):
print("<- RST")
raise self.END()
@ATMT.state()
def ERROR(self):
print("!! ERROR")
raise self.END()
@ATMT.state(final=1)
def END(self):
pass
@ATMT.condition(BEGIN)
def condition_BEGIN(self):
raise self.SYN()
@ATMT.condition(SYN)
def condition_SYN(self):
if random.randint(0, 1):
raise self.SYN_ACK()
else:
raise self.RST()
@ATMT.timeout(SYN, 1)
def timeout_SYN(self):
raise self.ERROR()
TCPScanner().run()
TCPScanner().run()
"""
Explanation: Automaton
When more logic is needed, Scapy provides a clever way abstraction to define an automaton. In a nutshell, you need to define an object that inherits from Automaton, and implement specific methods:
- states: using the @ATMT.state decorator. They usually do nothing
- conditions: using the @ATMT.condition and @ATMT.receive_condition decorators. They describe how to go from one state to another
- actions: using the ATMT.action decorator. They describe what to do, like sending a back, when changing state
The following example does nothing more than trying to mimic a TCP scanner:
End of explanation
"""
# Instantiate the blocks
clf = CLIFeeder()
ijs = InjectSink("enx3495db043a28")
# Plug blocks together
clf > ijs
# Create and start the engine
pe = PipeEngine(clf)
pe.start()
"""
Explanation: Pipes
Pipes are an advanced Scapy feature that aims sniffing, modifying and printing packets. The API provides several buildings blocks. All of them, have high entries and exits (>>) as well as low (>) ones.
For example, the CliFeeder is used to send message from the Python command line to a low exit. It can be combined to the InjectSink that reads message on its low entry and inject them to the specified network interface. These blocks can be combined as follows:
End of explanation
"""
clf.send("Hello Scapy !")
"""
Explanation: Packet can be sent using the following command on the prompt:
End of explanation
"""
|
magwenelab/mini-term-2016
|
ode-modeling1.ipynb
|
cc0-1.0
|
# import statements to make numeric and plotting functions available
%matplotlib inline
from numpy import *
from matplotlib.pyplot import *
## define your function in this cell
def hill_activating(X, B, K, n):
f = (B * X**n)/(K**n + X**n)
return f
## generate a plot using your hill_activating function defined above
# setup paramters for our simulation
B = 5
K = 10
x = linspace(0,90,200) # generate 200 evenly spaced points between 0 and 30
y = hill_activating(x, B, K, 4) # hill fxn with n = 1
y2 = hill_activating(x, B, 20, 4) # hill fxn with n = 1
y4 = hill_activating(x, 10, K, 4) # hill fxn with n = 1
y8 = hill_activating(x, B, K, 8) # hill fxn with n = 1
plot(x, y, label='K=10, B=5, n=4')
plot(x, y2, label='K=20, B=5, n=4')
plot(x, y4, label='K=10,B=10,n=4')
# plot(x, y8, label='n=8')
xlabel('Concentration of X')
ylabel('Promoter activity')
legend(loc='best')
ylim(0, 11)
pass
"""
Explanation: Modeling Gene Networks Using Ordinary Differential Equations
Author: Paul M. Magwene
<br>
Date: February 2016
To gain some intuition for how systems biologists build mathematical models of gene networks we're going to use computer simulations to explore the dynamical behavior of simple transcriptional networks.
In each of our simulations we will keep track of the the concentration of a different genes of interest as they change over time. The basic approach we will use to calculate changes in the quantity of different molecules are differential equations, which are simply a way of describing the instanteous change in a quantity of interest.
All of our differential equations will be of this form:
\begin{eqnarray}
\frac{dY}{dt} = \mbox{rate of production} - \mbox{rate of decay}
\end{eqnarray}
To state this in words -- the amount of gene $Y$ changes over time is a function of two things: 1) a growth term which represents the rate at which the gene is being transcribed and translated; and 2) a decay term which gives the rate at which $Y$ trascsripts and protein are being degraded.
In general we will assume that the "rate of production" is a function of the concentration of the genes that regulate $Y$(i.e. it's inputs in the transcriptional network), while the "rate of decay" is a proportional to the amount of $Y$ that is present. So the above formula will take the following structure:
$$
\frac{dY}{dt} = f(X_1, X_2, \ldots) - \alpha Y
$$
The $f(X_1, X_2, \ldots)$ term represents the growth term and is a function of the transcription factors that regulate $Y$. The term, $\alpha Y$ represents the rate at which $Y$ is being broken down or diluted. Notice that the decay rate is a proportional to the amount of $Y$ that is present. If $\frac{dy}{dt}$ is positive than the concentration of gene $Y$ is increasing, if $\frac{dy}{dt}$ is negative the concentration of $Y$ is decreasing, and if $\frac{dy}{dt} = 0$ than $Y$ is at steady state.
Modeling the rate of production with the Hill Function
An appropriate approach for modeling the rate of production of a protein, $Y$, as a function of it's inputs, $X_1, X_2,..$, is a with the "Hill Function". The Hill Function for a single transcriptional activator is:
$$
f(X) = \frac{\beta X^n}{K^n + X^n}
$$
$X$ represents the concentration of a transcriptional activator and $f(X)$ represents the the combined transcription and translation of the gene $Y$ that is regulated by $X$.
Modeling transcriptional activation
Write a Python function to represent transcriptional activation based on the Hill function given above:
End of explanation
"""
## generate curves for different n here
# setup paramters for our simulation
B = 5
K = 10
x = linspace(0,30,200) # generate 200 evenly spaced points between 0 and 30
y1 = hill_activating(x, B, K, 1) # hill fxn with n = 1
y2 = hill_activating(x, B, K, 2) # hill fxn with n = 2
y4 = hill_activating(x, B, K, 4) # hill fxn with n = 4
y8 = hill_activating(x, B, K, 8) # hill fxn with n = 8
plot(x, y1, label='n=1')
plot(x, y2, label='n=2')
plot(x, y4, label='n=4')
plot(x, y8, label='n=8')
xlabel('Concentration of X')
ylabel('Rate of production of Y')
legend(loc='best')
ylim(0, 6)
pass
"""
Explanation: <h2> <font color="firebrick">In class exercise</font> </h2>
Following the example above, generate plots for the Hill function where $n = {1, 2, 4, 8}$. Note that you can generate multiple curves in the same figure by repeatedly calling the plot function.
End of explanation
"""
## define your repressive hill function in this cell
def hill_repressing(X, B, K, n):
return B/(1 + (X/K)**n)
## generate a plot using your hill_activating function defined above
## For X values range from 0 to 30
B = 5
K = 10
x = linspace(0,30,200)
plot(x, hill_repressing(x, B, K, 1), label='n=1')
plot(x, hill_repressing(x, B, K, 2), label='n=2')
plot(x, hill_repressing(x, B, K, 4), label='n=4')
plot(x, hill_repressing(x, B, K, 8), label='n=8')
xlabel('Conc. of X')
ylabel('Rate of production of Y')
legend(loc='best')
ylim(0, 6)
pass
"""
Explanation: Transcriptional repression
If rather than stimulating the production of $Y$, $X$ "represses" $Y$, we can write the corresponding Hill function as:
$$
f(X) = \frac{\beta}{1 + (X/K)^n}
$$
Remember that both of these Hill functions (activating and repressing) describe the production of $Y$ as a function of the levels of $X$, not the temporal dynamics of $Y$ which we'll look at after developing a few more ideas.
Modeling transcriptional repression
Write a function to represent transcriptional repression, using the repressive Hill function given above:
End of explanation
"""
## write your logic approximation functions here
def logic_activating(X, B, K):
if X > K:
return B
else:
return 0
def logic_repressing(X, B, K):
if X < K:
return B
else:
return 0
"""
Explanation: <h2> <font color='firebrick'> Interactive exploration of the Hill function </font> </h2>
Download the file hill-fxn.py and pnsim.py from the course website. Run this application from your terminal with the command: python hill-fxn.py.
Run the hill-fxn.py script from your terminal by typing python hill-fxn.py.
There are three sliders at the bottom of the application window. You can drag the blue regions of these sliders left or right to change the indicated parameter values. The exact values of each parameter are shown to the right of the sliders. As you drag the sliders the plot will update to show you what the Hill function looks like for the combination of parameters you have currently specified.
Also note there is a dashed vertical line in the plot window. When you move your mouse over the plot window this line will follow your position. As you do so, x- and y-plot values in the lower left of the application window will update to show you the exact position your mouse is pointing to in the plot. The dashed line and the plot readout are useful for reading values off the plot.
<h3> <font color='firebrick'> Homework 1: Use the `hill-fxn.py` script to answer the following questions </font> </h3>
Vary the parameter $n$ over the range 1 to 10.
a) Describe what happens to the shape of the plot.
b) How does changing $n$ change the maximum (or asymptotic maximum) promoter activity ($V_{max}$)?
c) At what value of activator concentration is half of the maximum promoter activity reached?
Vary the parameter $\beta$. How does changing $\beta$ change:
a) the shape of the plot?
b) the maximum promoter activity?
c) the activator concentration corresponding to half-maximal promoter activity?
Vary the parameter $K$. How does changing $K$ change:
a) the shape of the plot?
b) the maximum promoter activity?
c) the activator concentration corresponding to half-maximal promoter activity?
Download and run the script hill-fxn-wlogic.py -- This is like the previous hill-fxn.py script except it now include a set of buttons for toggling the logic approximation on and off. As before vary the parameters $n$, $\beta$ and $K$.
a) When is the logic approximation a good approximation to the Hill function?
Simplifying Models using Logic Approximations
To simplify analysis it's often convenient to approximate step-like sigmoidal functions like those produced by the Hill equation with functions using logic approximations.
We'll assume that when the transcription factor, $X$, is above a threshold, $K$, then gene $Y$ is transcribed at a rate, $\beta$. When $X$ is below the threshold, $K$, gene $Y$ is not be transcribed. To represent this situation, we can rewrite the formula for $Y$ as:
$$
f(X) = \beta\ \Theta(X > K)
$$
where the function $\Theta$ is zero if the statement inside the parentheses is false or one if the statement is true. An alternate way to write this is:
$$
f(X) =
\begin{cases}
\beta, &\text{if $X > K$;} \
0, &\text{otherwise.}
\end{cases}
$$
When $X$ is a repressor we can write:
$$
f(X) = \beta\ \Theta(X < K)
$$
Python functions for the logic approximation
Write Python functions to represent the logic approximations for activation and repression as given above:
End of explanation
"""
## generate plots using your hill_activating and logic_activating functions defined above
## For X values range from 0 to 30
B = 5
K = 10
n = 4
x = linspace(0, 30, 200)
plot(x, hill_activating(x, B, K, n), label='n=8')
logicx = [logic_activating(i, B, K) for i in x]
plot(x, logicx, label='logic approximation')
xlabel('Concentration of X')
ylabel('Promoter activity')
ylim(-0.1, 5.5)
legend(loc='best')
pass
"""
Explanation: Generate functions comparing the logic approximation to the Hill function, for the activating case:
End of explanation
"""
## write a function to represent the simple differential equation above
def dYdt(B,a,Y):
return B - a*Y # replace with body of the fxn
## generate a plot using your dY function defined above
## Evaluated over 200 time units
Y = [0] # initial value of Y
B = 0.2
a = 0.05
nsteps = 200
for i in range(nsteps):
deltay = dYdt(B, a, Y[-1])
ynew = Y[-1] + deltay
Y.append(ynew)
plot(Y)
ylim(0, 4.5)
xlabel('Time units')
ylabel('Concentration of Y')
pass
"""
Explanation: Multi-dimensional Input Functions
What if a gene needs two or more activator proteins to be transcribed? We can describe the amount of $Z$ transcribed as a function of active forms of $X$ and $Y$ with a function like:
$$
f(X,Y) = \beta\ \Theta(X > K_x \land Y > K_y)
$$
The above equation describes "AND" logic (i.e. both X and Y have to be above their threshold levels, $K_x$ and $K_y$, for Z to be transcribed). In a similar manner we can define "OR" logic:
$$
f(X,Y) = \beta\ \Theta(X > K_x \lor Y > K_y)
$$
A SUM function would be defined like this:
$$
f(X,Y) = \beta_x \Theta(X > K_x) + \beta_y \Theta (Y > K_y)
$$
Modeling changes in network components over time
Up until this point we've been considering how the rate of production of a protein $Y$ changes with the concentration of a transcriptional activator/repressor that regulates $Y$. Now we want to turn to the question of how the absolute amount of $Y$ changes over time.
As we discussed at the beginning of this notebook, how the amount of $Y$ changes over time is a function of two things: 1) a growth term which represents the rate of production of $Y$; and 2) a decay term which gives the rate at which $Y$ is degraded. A differential equation describing this as follows:
$$
\frac{dY}{dt} = f(X_1, X_2, \ldots) - \alpha Y
$$
The $f(X_1, X_2, \ldots)$ term represents the growth term and is a function of the transcription factors that regulate $Y$. We've already seen a couple of ways to model the rate of producting -- using the Hill function or its logic approximation. For the sake of simplicity we'll use the logic approximation to model the growth term. For example, in the case $Y$ is regulated by a single input we might use $f(X) = \beta \theta(X > K_1)$. For the equivalent function where $Y$ was regulated by two transcription factor, $X_1$ and $X_2$, and both are required to be above the respective threshold, we could use the function $f(X_1, X_2) = \beta \theta (X_1 > K_1 \land X_2 > K_2)$.
The second term, $\alpha Y$ represents the rate at which $Y$ is being broken down or diluted. Notice that the decay rate is a proportional to the amount of $Y$ that is present.
Change in concentration under constant activation
Now let's explore a simple model of regulation for the two gene network, $X \longrightarrow Y$. Here we assume that at time 0 the activator, $X$, rises above the threshold, $K$, necessary to induce transcription of $Y$ at the rate $\beta$. $X$ remains above this threshold for the entire simulation. Therefore, we can write $dY/dt$ as:
$$
\frac{dY}{dt} = \beta - \alpha Y
$$
Write a Python function to represent the change in $Y$ in a given time increment, under this assumption of constant activation:
End of explanation
"""
# setup pulse of X
# off (0) for first 50 steps, on for next 100 steps, off again for last 100 steps
X = [0]*50 + [1]*100 + [0]*100
Y = [0]
B = 0.2
K = 0.5
a = 0.05
nsteps = 250
for i in range(1, nsteps):
xnow = X[i]
growth = logic_activating(xnow, B, K)
decay = a*Y[-1]
deltay = growth - decay
ynew = Y[-1] + deltay
Y.append(ynew)
plot(X, color='red', linestyle='dashed', label="X")
plot(Y, color='blue', label="Y")
ylim(0, 4.5)
xlabel('Time units')
ylabel('Concentration')
legend(loc="best")
pass
"""
Explanation: Toggling the activator X
In the proceeding example the activator $X$ was on at the beginning of the simulation and just stayed on. Let's see what happens when $X$ has pulsatile dynamics. This would be akin to toggling $X$ on then off, and asking what happens to $Y$.
End of explanation
"""
|
wanderer2/pymc3
|
docs/source/notebooks/GLM-robust-with-outlier-detection.ipynb
|
apache-2.0
|
%matplotlib inline
%qtconsole --colors=linux
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import optimize
import pymc3 as pm
import theano as thno
import theano.tensor as T
# configure some basic options
sns.set(style="darkgrid", palette="muted")
pd.set_option('display.notebook_repr_html', True)
plt.rcParams['figure.figsize'] = 12, 8
np.random.seed(0)
"""
Explanation: GLM: Robust Regression with Outlier Detection
A minimal reproducable example of Robust Regression with Outlier Detection using Hogg 2010 Signal vs Noise method.
This is a complementary approach to the Student-T robust regression as illustrated in Thomas Wiecki's notebook in the PyMC3 documentation, that approach is also compared here.
This model returns a robust estimate of linear coefficients and an indication of which datapoints (if any) are outliers.
The likelihood evaluation is essentially a copy of eqn 17 in "Data analysis recipes: Fitting a model to data" - Hogg 2010.
The model is adapted specifically from Jake Vanderplas' implementation (3rd model tested).
The dataset is tiny and hardcoded into this Notebook. It contains errors in both the x and y, but we will deal here with only errors in y.
Note:
Python 3.4 project using latest available PyMC3
Developed using ContinuumIO Anaconda distribution on a Macbook Pro 3GHz i7, 16GB RAM, OSX 10.10.5.
During development I've found that 3 data points are always indicated as outliers, but the remaining ordering of datapoints by decreasing outlier-hood is slightly unstable between runs: the posterior surface appears to have a small number of solutions with similar probability.
Finally, if runs become unstable or Theano throws weird errors, try clearing the cache $> theano-cache clear and rerunning the notebook.
Package Requirements (shown as a conda-env YAML):
```
$> less conda_env_pymc3_examples.yml
name: pymc3_examples
channels:
- defaults
dependencies:
- python=3.4
- ipython
- ipython-notebook
- ipython-qtconsole
- numpy
- scipy
- matplotlib
- pandas
- seaborn
- patsy
- pip
$> conda env create --file conda_env_pymc3_examples.yml
$> source activate pymc3_examples
$> pip install --process-dependency-links git+https://github.com/pymc-devs/pymc3
```
Setup
End of explanation
"""
#### cut & pasted directly from the fetch_hogg2010test() function
## identical to the original dataset as hardcoded in the Hogg 2010 paper
dfhogg = pd.DataFrame(np.array([[1, 201, 592, 61, 9, -0.84],
[2, 244, 401, 25, 4, 0.31],
[3, 47, 583, 38, 11, 0.64],
[4, 287, 402, 15, 7, -0.27],
[5, 203, 495, 21, 5, -0.33],
[6, 58, 173, 15, 9, 0.67],
[7, 210, 479, 27, 4, -0.02],
[8, 202, 504, 14, 4, -0.05],
[9, 198, 510, 30, 11, -0.84],
[10, 158, 416, 16, 7, -0.69],
[11, 165, 393, 14, 5, 0.30],
[12, 201, 442, 25, 5, -0.46],
[13, 157, 317, 52, 5, -0.03],
[14, 131, 311, 16, 6, 0.50],
[15, 166, 400, 34, 6, 0.73],
[16, 160, 337, 31, 5, -0.52],
[17, 186, 423, 42, 9, 0.90],
[18, 125, 334, 26, 8, 0.40],
[19, 218, 533, 16, 6, -0.78],
[20, 146, 344, 22, 5, -0.56]]),
columns=['id','x','y','sigma_y','sigma_x','rho_xy'])
## for convenience zero-base the 'id' and use as index
dfhogg['id'] = dfhogg['id'] - 1
dfhogg.set_index('id', inplace=True)
## standardize (mean center and divide by 1 sd)
dfhoggs = (dfhogg[['x','y']] - dfhogg[['x','y']].mean(0)) / dfhogg[['x','y']].std(0)
dfhoggs['sigma_y'] = dfhogg['sigma_y'] / dfhogg['y'].std(0)
dfhoggs['sigma_x'] = dfhogg['sigma_x'] / dfhogg['x'].std(0)
## create xlims ylims for plotting
xlims = (dfhoggs['x'].min() - np.ptp(dfhoggs['x'])/5
,dfhoggs['x'].max() + np.ptp(dfhoggs['x'])/5)
ylims = (dfhoggs['y'].min() - np.ptp(dfhoggs['y'])/5
,dfhoggs['y'].max() + np.ptp(dfhoggs['y'])/5)
## scatterplot the standardized data
g = sns.FacetGrid(dfhoggs, size=8)
_ = g.map(plt.errorbar, 'x', 'y', 'sigma_y', 'sigma_x', marker="o", ls='')
_ = g.axes[0][0].set_ylim(ylims)
_ = g.axes[0][0].set_xlim(xlims)
plt.subplots_adjust(top=0.92)
_ = g.fig.suptitle('Scatterplot of Hogg 2010 dataset after standardization', fontsize=16)
"""
Explanation: Load and Prepare Data
We'll use the Hogg 2010 data available at https://github.com/astroML/astroML/blob/master/astroML/datasets/hogg2010test.py
It's a very small dataset so for convenience, it's hardcoded below
End of explanation
"""
with pm.Model() as mdl_ols:
## Define weakly informative Normal priors to give Ridge regression
b0 = pm.Normal('b0_intercept', mu=0, sd=100)
b1 = pm.Normal('b1_slope', mu=0, sd=100)
## Define linear model
yest = b0 + b1 * dfhoggs['x']
## Use y error from dataset, convert into theano variable
sigma_y = thno.shared(np.asarray(dfhoggs['sigma_y'],
dtype=thno.config.floatX), name='sigma_y')
## Define Normal likelihood
likelihood = pm.Normal('likelihood', mu=yest, sd=sigma_y, observed=dfhoggs['y'])
"""
Explanation: Observe:
Even judging just by eye, you can see these datapoints mostly fall on / around a straight line with positive gradient
It looks like a few of the datapoints may be outliers from such a line
Create Conventional OLS Model
The linear model is really simple and conventional:
$$\bf{y} = \beta^{T} \bf{X} + \bf{\sigma}$$
where:
$\beta$ = coefs = ${1, \beta_{j \in X_{j}}}$
$\sigma$ = the measured error in $y$ in the dataset sigma_y
Define model
NOTE:
+ We're using a simple linear OLS model with Normally distributed priors so that it behaves like a ridge regression
End of explanation
"""
with mdl_ols:
## find MAP using Powell, seems to be more robust
start_MAP = pm.find_MAP(fmin=optimize.fmin_powell, disp=True)
## take samples
traces_ols = pm.sample(2000, start=start_MAP, step=pm.NUTS(), progressbar=True)
"""
Explanation: Sample
End of explanation
"""
_ = pm.traceplot(traces_ols[-1000:], figsize=(12,len(traces_ols.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.df_summary(traces_ols[-1000:]).iterrows()})
"""
Explanation: View Traces
NOTE: I'll 'burn' the traces to only retain the final 1000 samples
End of explanation
"""
with pm.Model() as mdl_studentt:
## Define weakly informative Normal priors to give Ridge regression
b0 = pm.Normal('b0_intercept', mu=0, sd=100)
b1 = pm.Normal('b1_slope', mu=0, sd=100)
## Define linear model
yest = b0 + b1 * dfhoggs['x']
## Use y error from dataset, convert into theano variable
sigma_y = thno.shared(np.asarray(dfhoggs['sigma_y'],
dtype=thno.config.floatX), name='sigma_y')
## define prior for Student T degrees of freedom
nu = pm.DiscreteUniform('nu', lower=1, upper=100)
## Define Student T likelihood
likelihood = pm.StudentT('likelihood', mu=yest, sd=sigma_y, nu=nu
,observed=dfhoggs['y'])
"""
Explanation: NOTE: We'll illustrate this OLS fit and compare to the datapoints in the final plot
Create Robust Model: Student-T Method
I've added this brief section in order to directly compare the Student-T based method exampled in Thomas Wiecki's notebook in the PyMC3 documentation
Instead of using a Normal distribution for the likelihood, we use a Student-T, which has fatter tails. In theory this allows outliers to have a smaller mean square error in the likelihood, and thus have less influence on the regression estimation. This method does not produce inlier / outlier flags but is simpler and faster to run than the Signal Vs Noise model below, so a comparison seems worthwhile.
Note: we'll constrain the Student-T 'degrees of freedom' parameter nu to be an integer, but otherwise leave it as just another stochastic to be inferred: no need for prior knowledge.
Define Model
End of explanation
"""
with mdl_studentt:
## find MAP using Powell, seems to be more robust
start_MAP = pm.find_MAP(fmin=optimize.fmin_powell, disp=True)
## two-step sampling to allow Metropolis for nu (which is discrete)
step1 = pm.NUTS([b0, b1])
step2 = pm.Metropolis([nu])
## take samples
traces_studentt = pm.sample(2000, start=start_MAP, step=[step1, step2], progressbar=True)
"""
Explanation: Sample
End of explanation
"""
_ = pm.traceplot(traces_studentt[-1000:]
,figsize=(12,len(traces_studentt.varnames)*1.5)
,lines={k: v['mean'] for k, v in pm.df_summary(traces_studentt[-1000:]).iterrows()})
"""
Explanation: View Traces
End of explanation
"""
def logp_signoise(yobs, is_outlier, yest_in, sigma_y_in, yest_out, sigma_y_out):
'''
Define custom loglikelihood for inliers vs outliers.
NOTE: in this particular case we don't need to use theano's @as_op
decorator because (as stated by Twiecki in conversation) that's only
required if the likelihood cannot be expressed as a theano expression.
We also now get the gradient computation for free.
'''
# likelihood for inliers
pdfs_in = T.exp(-(yobs - yest_in + 1e-4)**2 / (2 * sigma_y_in**2))
pdfs_in /= T.sqrt(2 * np.pi * sigma_y_in**2)
logL_in = T.sum(T.log(pdfs_in) * (1 - is_outlier))
# likelihood for outliers
pdfs_out = T.exp(-(yobs - yest_out + 1e-4)**2 / (2 * (sigma_y_in**2 + sigma_y_out**2)))
pdfs_out /= T.sqrt(2 * np.pi * (sigma_y_in**2 + sigma_y_out**2))
logL_out = T.sum(T.log(pdfs_out) * is_outlier)
return logL_in + logL_out
with pm.Model() as mdl_signoise:
## Define weakly informative Normal priors to give Ridge regression
b0 = pm.Normal('b0_intercept', mu=0, sd=100)
b1 = pm.Normal('b1_slope', mu=0, sd=100)
## Define linear model
yest_in = b0 + b1 * dfhoggs['x']
## Define weakly informative priors for the mean and variance of outliers
yest_out = pm.Normal('yest_out', mu=0, sd=100)
sigma_y_out = pm.HalfNormal('sigma_y_out', sd=100)
## Define Bernoulli inlier / outlier flags according to a hyperprior
## fraction of outliers, itself constrained to [0,.5] for symmetry
frac_outliers = pm.Uniform('frac_outliers', lower=0., upper=.5)
is_outlier = pm.Bernoulli('is_outlier', p=frac_outliers, shape=dfhoggs.shape[0])
## Extract observed y and sigma_y from dataset, encode as theano objects
yobs = thno.shared(np.asarray(dfhoggs['y'], dtype=thno.config.floatX), name='yobs')
sigma_y_in = thno.shared(np.asarray(dfhoggs['sigma_y']
, dtype=thno.config.floatX), name='sigma_y_in')
## Use custom likelihood using DensityDist
likelihood = pm.DensityDist('likelihood', logp_signoise,
observed={'yobs':yobs, 'is_outlier':is_outlier,
'yest_in':yest_in, 'sigma_y_in':sigma_y_in,
'yest_out':yest_out, 'sigma_y_out':sigma_y_out})
"""
Explanation: Observe:
Both parameters b0 and b1 show quite a skew to the right, possibly this is the action of a few samples regressing closer to the OLS estimate which is towards the left
The nu parameter seems very happy to stick at nu = 1, indicating that a fat-tailed Student-T likelihood has a better fit than a thin-tailed (Normal-like) Student-T likelihood.
The inference sampling also ran very quickly, almost as quickly as the conventional OLS
NOTE: We'll illustrate this Student-T fit and compare to the datapoints in the final plot
Create Robust Model with Outliers: Hogg Method
Please read the paper (Hogg 2010) and Jake Vanderplas' code for more complete information about the modelling technique.
The general idea is to create a 'mixture' model whereby datapoints can be described by either the linear model (inliers) or a modified linear model with different mean and larger variance (outliers).
The likelihood is evaluated over a mixture of two likelihoods, one for 'inliers', one for 'outliers'. A Bernouilli distribution is used to randomly assign datapoints in N to either the inlier or outlier groups, and we sample the model as usual to infer robust model parameters and inlier / outlier flags:
$$
\mathcal{logL} = \sum_{i}^{i=N} log \left[ \frac{(1 - B_{i})}{\sqrt{2 \pi \sigma_{in}^{2}}} exp \left( - \frac{(x_{i} - \mu_{in})^{2}}{2\sigma_{in}^{2}} \right) \right] + \sum_{i}^{i=N} log \left[ \frac{B_{i}}{\sqrt{2 \pi (\sigma_{in}^{2} + \sigma_{out}^{2})}} exp \left( - \frac{(x_{i}- \mu_{out})^{2}}{2(\sigma_{in}^{2} + \sigma_{out}^{2})} \right) \right]
$$
where:
$\bf{B}$ is Bernoulli-distibuted $B_{i} \in [0_{(inlier)},1_{(outlier)}]$
Define model
End of explanation
"""
with mdl_signoise:
## two-step sampling to create Bernoulli inlier/outlier flags
step1 = pm.NUTS([frac_outliers, yest_out, sigma_y_out, b0, b1])
step2 = pm.BinaryMetropolis([is_outlier], tune_interval=100)
## find MAP using Powell, seems to be more robust
start_MAP = pm.find_MAP(fmin=optimize.fmin_powell, disp=True)
## take samples
traces_signoise = pm.sample(2000, start=start_MAP, step=[step1,step2], progressbar=True)
"""
Explanation: Sample
End of explanation
"""
_ = pm.traceplot(traces_signoise[-1000:], figsize=(12,len(traces_signoise.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.df_summary(traces_signoise[-1000:]).iterrows()})
"""
Explanation: View Traces
End of explanation
"""
outlier_melt = pd.melt(pd.DataFrame(traces_signoise['is_outlier', -1000:],
columns=['[{}]'.format(int(d)) for d in dfhoggs.index]),
var_name='datapoint_id', value_name='is_outlier')
ax0 = sns.pointplot(y='datapoint_id', x='is_outlier', data=outlier_melt,
kind='point', join=False, ci=None, size=4, aspect=2)
_ = ax0.vlines([0,1], 0, 19, ['b','r'], '--')
_ = ax0.set_xlim((-0.1,1.1))
_ = ax0.set_xticks(np.arange(0, 1.1, 0.1))
_ = ax0.set_xticklabels(['{:.0%}'.format(t) for t in np.arange(0,1.1,0.1)])
_ = ax0.yaxis.grid(True, linestyle='-', which='major', color='w', alpha=0.4)
_ = ax0.set_title('Prop. of the trace where datapoint is an outlier')
_ = ax0.set_xlabel('Prop. of the trace where is_outlier == 1')
"""
Explanation: NOTE:
During development I've found that 3 datapoints id=[1,2,3] are always indicated as outliers, but the remaining ordering of datapoints by decreasing outlier-hood is unstable between runs: the posterior surface appears to have a small number of solutions with very similar probability.
The NUTS sampler seems to work okay, and indeed it's a nice opportunity to demonstrate a custom likelihood which is possible to express as a theano function (thus allowing a gradient-based sampler like NUTS). However, with a more complicated dataset, I would spend time understanding this instability and potentially prefer using more samples under Metropolis-Hastings.
Declare Outliers and Compare Plots
View ranges for inliers / outlier predictions
At each step of the traces, each datapoint may be either an inlier or outlier. We hope that the datapoints spend an unequal time being one state or the other, so let's take a look at the simple count of states for each of the 20 datapoints.
End of explanation
"""
cutoff = 5
dfhoggs['outlier'] = np.percentile(traces_signoise[-1000:]['is_outlier'],cutoff, axis=0)
dfhoggs['outlier'].value_counts()
"""
Explanation: Observe:
The plot above shows the number of samples in the traces in which each datapoint is marked as an outlier, expressed as a percentage.
In particular, 3 points [1, 2, 3] spend >=95% of their time as outliers
Contrastingly, points at the other end of the plot close to 0% are our strongest inliers.
For comparison, the mean posterior value of frac_outliers is ~0.35, corresponding to roughly 7 of the 20 datapoints. You can see these 7 datapoints in the plot above, all those with a value >50% or thereabouts.
However, only 3 of these points are outliers >=95% of the time.
See note above regarding instability between runs.
The 95% cutoff we choose is subjective and arbitrary, but I prefer it for now, so let's declare these 3 to be outliers and see how it looks compared to Jake Vanderplas' outliers, which were declared in a slightly different way as points with means above 0.68.
Declare outliers
Note:
+ I will declare outliers to be datapoints that have value == 1 at the 5-percentile cutoff, i.e. in the percentiles from 5 up to 100, their values are 1.
+ Try for yourself altering cutoff to larger values, which leads to an objective ranking of outlier-hood.
End of explanation
"""
g = sns.FacetGrid(dfhoggs, size=8, hue='outlier', hue_order=[True,False],
palette='Set1', legend_out=False)
lm = lambda x, samp: samp['b0_intercept'] + samp['b1_slope'] * x
pm.glm.plot_posterior_predictive(traces_ols[-1000:],
eval=np.linspace(-3, 3, 10), lm=lm, samples=200, color='#22CC00', alpha=.2)
pm.glm.plot_posterior_predictive(traces_studentt[-1000:], lm=lm,
eval=np.linspace(-3, 3, 10), samples=200, color='#FFA500', alpha=.5)
pm.glm.plot_posterior_predictive(traces_signoise[-1000:], lm=lm,
eval=np.linspace(-3, 3, 10), samples=200, color='#357EC7', alpha=.3)
_ = g.map(plt.errorbar, 'x', 'y', 'sigma_y', 'sigma_x', marker="o", ls='').add_legend()
_ = g.axes[0][0].annotate('OLS Fit: Green\nStudent-T Fit: Orange\nSignal Vs Noise Fit: Blue',
size='x-large', xy=(1,0), xycoords='axes fraction',
xytext=(-160,10), textcoords='offset points')
_ = g.axes[0][0].set_ylim(ylims)
_ = g.axes[0][0].set_xlim(xlims)
"""
Explanation: Posterior Prediction Plots for OLS vs StudentT vs SignalNoise
End of explanation
"""
|
miykael/nipype_tutorial
|
notebooks/advanced_aws.ipynb
|
bsd-3-clause
|
from nipype.interfaces.io import DataSink
ds = DataSink()
ds.inputs.base_directory = 's3://mybucket/path/to/output/dir'
"""
Explanation: Using Nipype with Amazon Web Services (AWS)
Several groups have been successfully using Nipype on AWS. This procedure
involves setting a temporary cluster using StarCluster and potentially
transferring files to/from S3. The latter is supported by Nipype through
DataSink and S3DataGrabber.
Using DataSink with S3
The DataSink class now supports sending output data directly to an AWS S3
bucket. It does this through the introduction of several input attributes to the
DataSink interface and by parsing the base_directory attribute. This class
uses the boto3 and
botocore Python packages to
interact with AWS. To configure the DataSink to write data to S3, the user must
set the base_directory property to an S3-style filepath.
For example:
End of explanation
"""
ds.inputs.creds_path = '/home/neuro/aws_creds/credentials.csv'
ds.inputs.encrypt_bucket_keys = True
ds.local_copy = '/home/neuro/workflow_outputs/local_backup'
"""
Explanation: With the "s3://" prefix in the path, the DataSink knows that the output
directory to send files is on S3 in the bucket "mybucket". "path/to/output/dir"
is the relative directory path within the bucket "mybucket" where output data
will be uploaded to (Note: if the relative path specified contains folders that
don’t exist in the bucket, the DataSink will create them). The DataSink treats
the S3 base directory exactly as it would a local directory, maintaining support
for containers, substitutions, subfolders, "." notation, etc. to route output
data appropriately.
There are four new attributes introduced with S3-compatibility: creds_path,
encrypt_bucket_keys, local_copy, and bucket.
End of explanation
"""
|
limu007/TPX
|
ExperimentEval.ipynb
|
mit
|
x=np.r_[-3:3:20j]
sigy=3.
tres=[0.5,0.2,7,-0.5,0] #skutecne parametry
ytrue=np.polyval(tres,x)
pl.plot(x,ytrue,'k')
y=ytrue+np.random.normal(0,sigy,size=x.shape)
pl.plot(x,y,'*')
"""
Explanation: <footer id="attribution" style="float:right; color:#999; background:#fff;">
Sitola seminar 2019</footer>
Variance decomposition
in ANOVA we study different factors and their interactions
single factor means marginalize out all other dependancies
Bias - variance tradeoff
increasing model complexity to remove systematic residuals
uncertainties of parameters rise fast due to correlations
Design of experiments
full and partial "designs"
see for example
https://newonlinecourses.science.psu.edu/stat503/
Simple 4th order polynom
we try to model data with set of polynomes of type $\sum_{i=0}^n a_i x^i$ with increasing degree $n$
true values correspond to 4-th order polynom (quadratic term dominates)
End of explanation
"""
errs=[[round(p,5) for p in np.sqrt(r[1].diagonal()[::-1])/sigy] for r in res]
text=[" ".join(["%6.3f"%p for p in abs(res[i][0][::-1]/np.array(errs[i]))]) for i in range(len(res))]
for i in range(1,len(text)+1):
print("order %i:"%i,text[i-1])
"""
Explanation: Statistical significance of parameters
t-values for null-hypothesis of individual parameters (polynomial coefficients)
End of explanation
"""
ords=np.arange(1,10)
res=[np.polyfit(x,y,i,cov=True) for i in ords]
#[[round(p,3) for p in r[0][::-1]] for r in res]
text=[" ".join(["%6.3f"%p for p in r[0][::-1]]) for r in res]
for i in range(1,len(text)+1):
print("order %i:"%i,text[i-1])
"""
Explanation: significant values at higher order coefficients
End of explanation
"""
%matplotlib inline
from matplotlib import pyplot as pl
import numpy as np
x=np.r_[0:8:100j]
fun=[np.ones_like(x),0.5*x]#0.05*x**2]
fun.append(0.3*np.sin(x*2.))
fun.append(np.exp(-(x-5)**2/2.))
fun.append(np.exp(-(x-2)**2))
nmeas=20
[pl.plot(x,f) for f in fun]
pl.legend(["constant","linear","periodic","low peak","high peak"])
allpars=np.array([np.random.normal(a,0.4*abs(a),size=nmeas) for a in [3,-1,1.5,2,3.1]]) #sada ruznych koefientu jednotlivych komponent
"""
Explanation: Principal component analysis
"ultimate correlation tool" (C.R.Jenkins) - transforming into space of orthogonal directions
first component $a_1 X$ to maximize variance in chosen direction $a_1$
next
End of explanation
"""
testfunc=allpars.T.dot(fun)
for i in range(len(testfunc)):
testfunc[i]+=np.random.normal(0,0.3,size=len(x))
pl.plot(x,testfunc[3])
pl.plot(x,testfunc[1]);
renfunc=testfunc-testfunc.mean(1)[:,np.newaxis]
renfunc/=np.sqrt((renfunc**2).sum(1))[:,np.newaxis]
cormat10=renfunc[:10].dot(renfunc[:10].T)
eig10,vecs10=np.linalg.eig(cormat10)
#eig10
prinfunc=vecs10.T.dot(renfunc[:10])
[pl.plot(p) for p in prinfunc[:4]];
"""
Explanation: we have 20 sets of parameters, generate combination of basic functions and add some gaussian noise
End of explanation
"""
import nbinteract as nbi
def empirical_props(num_plants):
return none
opts = {'title': 'Distribution of sample proportions',
'xlabel': 'Sample Proportion','ylabel': 'Percent per unit',
'xlim': (0.64, 0.84),'ylim': (0, 25), 'bins': 20,}
nbi.hist(empirical_props, options=opts,
num_plants=widgets.ToggleButtons(options=[100, 200, 400, 800]))
"""
Explanation: Interactive histograms
End of explanation
"""
|
nwjs/chromium.src
|
third_party/tensorflow-text/src/docs/guide/bert_preprocessing_guide.ipynb
|
bsd-3-clause
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2021 The TensorFlow Authors.
End of explanation
"""
!pip install -q -U tensorflow-text
import tensorflow as tf
import tensorflow_text as text
import functools
"""
Explanation: BERT Preprocessing with TF Text
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/text/guide/bert_preprocessing_guide"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/text/blob/master/docs/guide/bert_preprocessing_guide.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/text/blob/master/docs/guide/bert_preprocessing_guide.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/text/docs/guide/bert_preprocessing_guide.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Overview
Text preprocessing is the end-to-end transformation of raw text into a model’s integer inputs. NLP models are often accompanied by several hundreds (if not thousands) of lines of Python code for preprocessing text. Text preprocessing is often a challenge for models because:
Training-serving skew. It becomes increasingly difficult to ensure that the preprocessing logic of the model's inputs are consistent at all stages of model development (e.g. pretraining, fine-tuning, evaluation, inference).
Using different hyperparameters, tokenization, string preprocessing algorithms or simply packaging model inputs inconsistently at different stages could yield hard-to-debug and disastrous effects to the model.
Efficiency and flexibility. While preprocessing can be done offline (e.g. by writing out processed outputs to files on disk and then reconsuming said preprocessed data in the input pipeline), this method incurs an additional file read and write cost. Preprocessing offline is also inconvenient if there are preprocessing decisions that need to happen dynamically. Experimenting with a different option would require regenerating the dataset again.
Complex model interface. Text models are much more understandable when their inputs are pure text. It's hard to understand a model when its inputs require an extra, indirect encoding step. Reducing the preprocessing complexity is especially appreciated for model debugging, serving, and evaluation.
Additionally, simpler model interfaces also make it more convenient to try the model (e.g. inference or training) on different, unexplored datasets.
Text preprocessing with TF.Text
Using TF.Text's text preprocessing APIs, we can construct a preprocessing
function that can transform a user's text dataset into the model's
integer inputs. Users can package preprocessing directly as part of their model to alleviate the above mentioned problems.
This tutorial will show how to use TF.Text preprocessing ops to transform text data into inputs for the BERT model and inputs for language masking pretraining task described in "Masked LM and Masking Procedure" of BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. The process involves tokenizing text into subword units, combining sentences, trimming content to a fixed size and extracting labels for the masked language modeling task.
Setup
Let's import the packages and libraries we need first.
End of explanation
"""
examples = {
"text_a": [
b"Sponge bob Squarepants is an Avenger",
b"Marvel Avengers"
],
"text_b": [
b"Barack Obama is the President.",
b"President is the highest office"
],
}
dataset = tf.data.Dataset.from_tensor_slices(examples)
next(iter(dataset))
"""
Explanation: Our data contains two text features and we can create a example tf.data.Dataset. Our goal is to create a function that we can supply Dataset.map() with to be used in training.
End of explanation
"""
_VOCAB = [
# Special tokens
b"[UNK]", b"[MASK]", b"[RANDOM]", b"[CLS]", b"[SEP]",
# Suffixes
b"##ack", b"##ama", b"##ger", b"##gers", b"##onge", b"##pants", b"##uare",
b"##vel", b"##ven", b"an", b"A", b"Bar", b"Hates", b"Mar", b"Ob",
b"Patrick", b"President", b"Sp", b"Sq", b"bob", b"box", b"has", b"highest",
b"is", b"office", b"the",
]
_START_TOKEN = _VOCAB.index(b"[CLS]")
_END_TOKEN = _VOCAB.index(b"[SEP]")
_MASK_TOKEN = _VOCAB.index(b"[MASK]")
_RANDOM_TOKEN = _VOCAB.index(b"[RANDOM]")
_UNK_TOKEN = _VOCAB.index(b"[UNK]")
_MAX_SEQ_LEN = 8
_MAX_PREDICTIONS_PER_BATCH = 5
_VOCAB_SIZE = len(_VOCAB)
lookup_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(
keys=_VOCAB,
key_dtype=tf.string,
values=tf.range(
tf.size(_VOCAB, out_type=tf.int64), dtype=tf.int64),
value_dtype=tf.int64),
num_oov_buckets=1
)
"""
Explanation: Tokenizing
Our first step is to run any string preprocessing and tokenize our dataset. This can be done using the text.BertTokenizer, which is a text.Splitter that can tokenize sentences into subwords or wordpieces for the BERT model given a vocabulary generated from the Wordpiece algorithm. You can learn more about other subword tokenizers available in TF.Text from here.
The vocabulary can be from a previously generated BERT checkpoint, or you can generate one yourself on your own data. For the purposes of this example, let's create a toy vocabulary:
End of explanation
"""
bert_tokenizer = text.BertTokenizer(lookup_table, token_out_type=tf.string)
bert_tokenizer.tokenize(examples["text_a"])
bert_tokenizer.tokenize(examples["text_b"])
"""
Explanation: Let's construct a text.BertTokenizer using the above vocabulary and tokenize the text inputs into a RaggedTensor.`.
End of explanation
"""
bert_tokenizer = text.BertTokenizer(lookup_table, token_out_type=tf.int64)
segment_a = bert_tokenizer.tokenize(examples["text_a"])
segment_a
segment_b = bert_tokenizer.tokenize(examples["text_b"])
segment_b
"""
Explanation: Text output from text.BertTokenizer allows us see how the text is being tokenized, but the model requires integer IDs. We can set the token_out_type param to tf.int64 to obtain integer IDs (which are the indices into the vocabulary).
End of explanation
"""
segment_a = segment_a.merge_dims(-2, -1)
segment_a
segment_b = segment_b.merge_dims(-2, -1)
segment_b
"""
Explanation: text.BertTokenizer returns a RaggedTensor with shape [batch, num_tokens, num_wordpieces]. Because we don't need the extra num_tokens dimensions for our current use case, we can merge the last two dimensions to obtain a RaggedTensor with shape [batch, num_wordpieces]:
End of explanation
"""
trimmer = text.RoundRobinTrimmer(max_seq_length=[_MAX_SEQ_LEN])
trimmed = trimmer.trim([segment_a, segment_b])
trimmed
"""
Explanation: Content Trimming
The main input to BERT is a concatenation of two sentences. However, BERT requires inputs to be in a fixed-size and shape and we may have content which exceed our budget.
We can tackle this by using a text.Trimmer to trim our content down to a predetermined size (once concatenated along the last axis). There are different text.Trimmer types which select content to preserve using different algorithms. text.RoundRobinTrimmer for example will allocate quota equally for each segment but may trim the ends of sentences. text.WaterfallTrimmer will trim starting from the end of the last sentence.
For our example, we will use RoundRobinTrimmer which selects items from each segment in a left-to-right manner.
End of explanation
"""
segments_combined, segments_ids = text.combine_segments(
[segment_a, segment_b],
start_of_sequence_id=_START_TOKEN, end_of_segment_id=_END_TOKEN)
segments_combined, segments_ids
"""
Explanation: trimmed now contains the segments where the number of elements across a batch is 8 elements (when concatenated along axis=-1).
Combining segments
Now that we have segments trimmed, we can combine them together to get a single RaggedTensor. BERT uses special tokens to indicate the beginning ([CLS]) and end of a segment ([SEP]). We also need a RaggedTensor indicating which items in the combined Tensor belong to which segment. We can use text.combine_segments() to get both of these Tensor with special tokens inserted.
End of explanation
"""
random_selector = text.RandomItemSelector(
max_selections_per_batch=_MAX_PREDICTIONS_PER_BATCH,
selection_rate=0.2,
unselectable_ids=[_START_TOKEN, _END_TOKEN, _UNK_TOKEN]
)
selected = random_selector.get_selection_mask(
segments_combined, axis=1)
selected
"""
Explanation: Masked Language Model Task
Now that we have our basic inputs, we can begin to extract the inputs needed for the "Masked LM and Masking Procedure" task described in BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
The masked language model task has two sub-problems for us to think about: (1) what items to select for masking and (2) what values are they assigned?
Item Selection
Because we will choose to select items randomly for masking, we will use a text.RandomItemSelector. RandomItemSelector randomly selects items in a batch subject to restrictions given (max_selections_per_batch, selection_rate and unselectable_ids) and returns a boolean mask indicating which items were selected.
End of explanation
"""
input_ids = tf.ragged.constant([[19, 7, 21, 20, 9, 8], [13, 4, 16, 5], [15, 10, 12, 11, 6]])
mask_values_chooser = text.MaskValuesChooser(_VOCAB_SIZE, _MASK_TOKEN, 0.8)
mask_values_chooser.get_mask_values(input_ids)
"""
Explanation: Choosing the Masked Value
The methodology described the original BERT paper for choosing the value for masking is as follows:
For mask_token_rate of the time, replace the item with the [MASK] token:
"my dog is hairy" -> "my dog is [MASK]"
For random_token_rate of the time, replace the item with a random word:
"my dog is hairy" -> "my dog is apple"
For 1 - mask_token_rate - random_token_rate of the time, keep the item
unchanged:
"my dog is hairy" -> "my dog is hairy."
text.MaskedValuesChooser encapsulates this logic and can be used for our preprocessing function. Here's an example of what MaskValuesChooser returns given a mask_token_rate of 80% and default random_token_rate:
End of explanation
"""
masked_token_ids, masked_pos, masked_lm_ids = text.mask_language_model(
segments_combined,
item_selector=random_selector, mask_values_chooser=mask_values_chooser)
"""
Explanation: When supplied with a RaggedTensor input, text.MaskValuesChooser returns a RaggedTensor of the same shape with either _MASK_VALUE (0), a random ID, or the same unchanged id.
Generating Inputs for Masked Language Model Task
Now that we have a RandomItemSelector to help us select items for masking and text.MaskValuesChooser to assign the values, we can use text.mask_language_model() to assemble all the inputs of this task for our BERT model.
End of explanation
"""
masked_token_ids
"""
Explanation: Let's dive deeper and examine the outputs of mask_language_model(). The output of masked_token_ids is:
End of explanation
"""
tf.gather(_VOCAB, masked_token_ids)
"""
Explanation: Remember that our input is encoded using a vocabulary. If we decode masked_token_ids using our vocabulary, we get:
End of explanation
"""
masked_pos
"""
Explanation: Notice that some wordpiece tokens have been replaced with either [MASK], [RANDOM] or a different ID value. masked_pos output gives us the indices (in the respective batch) of the tokens that have been replaced.
End of explanation
"""
masked_lm_ids
"""
Explanation: masked_lm_ids gives us the original value of the token.
End of explanation
"""
tf.gather(_VOCAB, masked_lm_ids)
"""
Explanation: We can again decode the IDs here to get human readable values.
End of explanation
"""
# Prepare and pad combined segment inputs
input_word_ids, input_mask = text.pad_model_inputs(
masked_token_ids, max_seq_length=_MAX_SEQ_LEN)
input_type_ids, _ = text.pad_model_inputs(
masked_token_ids, max_seq_length=_MAX_SEQ_LEN)
# Prepare and pad masking task inputs
masked_lm_positions, masked_lm_weights = text.pad_model_inputs(
masked_token_ids, max_seq_length=_MAX_PREDICTIONS_PER_BATCH)
masked_lm_ids, _ = text.pad_model_inputs(
masked_lm_ids, max_seq_length=_MAX_PREDICTIONS_PER_BATCH)
model_inputs = {
"input_word_ids": input_word_ids,
"input_mask": input_mask,
"input_type_ids": input_type_ids,
"masked_lm_ids": masked_lm_ids,
"masked_lm_positions": masked_lm_positions,
"masked_lm_weights": masked_lm_weights,
}
model_inputs
"""
Explanation: Padding Model Inputs
Now that we have all the inputs for our model, the last step in our preprocessing is to package them into fixed 2-dimensional Tensors with padding and also generate a mask Tensor indicating the values which are pad values. We can use text.pad_model_inputs() to help us with this task.
End of explanation
"""
def bert_pretrain_preprocess(vocab_table, features):
# Input is a string Tensor of documents, shape [batch, 1].
text_a = features["text_a"]
text_b = features["text_b"]
# Tokenize segments to shape [num_sentences, (num_words)] each.
tokenizer = text.BertTokenizer(
vocab_table,
token_out_type=tf.int64)
segments = [tokenizer.tokenize(text).merge_dims(
1, -1) for text in (text_a, text_b)]
# Truncate inputs to a maximum length.
trimmer = text.RoundRobinTrimmer(max_seq_length=6)
trimmed_segments = trimmer.trim(segments)
# Combine segments, get segment ids and add special tokens.
segments_combined, segment_ids = text.combine_segments(
trimmed_segments,
start_of_sequence_id=_START_TOKEN,
end_of_segment_id=_END_TOKEN)
# Apply dynamic masking task.
masked_input_ids, masked_lm_positions, masked_lm_ids = (
text.mask_language_model(
segments_combined,
random_selector,
mask_values_chooser,
)
)
# Prepare and pad combined segment inputs
input_word_ids, input_mask = text.pad_model_inputs(
masked_input_ids, max_seq_length=_MAX_SEQ_LEN)
input_type_ids, _ = text.pad_model_inputs(
masked_input_ids, max_seq_length=_MAX_SEQ_LEN)
# Prepare and pad masking task inputs
masked_lm_positions, masked_lm_weights = text.pad_model_inputs(
masked_input_ids, max_seq_length=_MAX_PREDICTIONS_PER_BATCH)
masked_lm_ids, _ = text.pad_model_inputs(
masked_lm_ids, max_seq_length=_MAX_PREDICTIONS_PER_BATCH)
model_inputs = {
"input_word_ids": input_word_ids,
"input_mask": input_mask,
"input_type_ids": input_type_ids,
"masked_lm_ids": masked_lm_ids,
"masked_lm_positions": masked_lm_positions,
"masked_lm_weights": masked_lm_weights,
}
return model_inputs
"""
Explanation: Review
Let's review what we have so far and assemble our preprocessing function. Here's what we have:
End of explanation
"""
dataset = tf.data.Dataset.from_tensors(examples)
dataset = dataset.map(functools.partial(
bert_pretrain_preprocess, lookup_table))
next(iter(dataset))
"""
Explanation: We previously constructed a tf.data.Dataset and we can now use our assembled preprocessing function bert_pretrain_preprocess() in Dataset.map(). This allows us to create an input pipeline for transforming our raw string data into integer inputs and feed directly into our model.
End of explanation
"""
|
blanton144/exex
|
docs/notebooks/images.ipynb
|
bsd-3-clause
|
A1, A2 = 10, 1
sig1 = 1.
sig2 = 2.47 * sig1
xc, yc = 13.3, 14.1 #real center
a = 2 * np.sqrt(2 * np.log(2)) # a ~ 2.4
FWHM1, FWHM2 = a * sig1, a * sig2
dx, dy = 1, 1 #dx,dy<=sqrt(2*np.log(2))*sig1~1.2
x = np.arange(0, 30., dx)
y = np.arange(0.,30., dy)
xx, yy = np.meshgrid(x, y) #xx,yy=i,j
Nx = len(x)
Ny = len(y)
z = (A1 * np.exp(- ((xx - xc)**2 + (yy - yc)**2) / (2 * sig1**2)) +
A2 * np.exp(- ((xx - xc)**2 + (yy - yc)**2) / (2 * sig2**2)))
z = z.transpose()
#refer to value at (xx,yy): z[i,j]
plt.figure()
plt.imshow(z, cmap='gray', vmin=z.min(), vmax=z.max(),
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest', origin='lower')
plt.title('image (nearest)')
plt.colorbar()
plt.show()
"""
Explanation: 3. Image Centroid Accuracy
Author: Jiarong Zhu
This problem tests the measurement of image centroids.
(a) Write a piece of code to generate a fake, critically-sampled image of a double Gaussian, with a center that isn’t necessarily at the center of a pixel. For the second Gaussian, use A2 = 0.1A1 and σ2 = 2.47σ1, where A indicates the value at the center of the Gaussian. This choice is an approximate description of the atmospheric PSF (Jim Gunn, private communication).
(b) Write a routine to find the light-weighted centroid of the image. Start by using the maximum pixel value, and use your knowledge of the PSF FWHM to calculate the center based on the light within 3 FWHM, and iterate to convergence.
(c) Write a routine to find the mode of the image. Start by using the maximum pixel value, but then use the 3×3 grid of pixels in the center to perform a quadratic interpolation to find the peak.
(d) Now add noise to the images, and use a Monte Carlo test to evaluate how the precision of each estimate depends on the total S/N within 3 FWHM.
3.a. Double Gaussian Image
Generate double gaussian image. Note that to be at least critically sampled,
the FWHM of PSF should be at least 2 pixels. Here I set up (x,y) exactly
equal to index (i,j) to make life easier. The real position of the peak
is set up to be not at the center of a pixel.
End of explanation
"""
def light_weighted_centroid(z, noise):
data = z + noise
index = np.where(data == np.amax(data)) # find index of pixel with maximum value
i0, j0 = index[0].item(), index[1].item()
R = int(3 * min(FWHM2, FWHM1) /dx)
l0 = 0 # 0th order moment
l1 = np.array([0, 0]) # 1st order moment
for i in range(len(x)):
for j in range(len(y)):
if ((i - i0)**2 + (j - j0)**2 <= R**2):
l0 = l0 + data[i, j]
l1 = l1 + np.array([i, j]) * data[i, j]
(x0, y0) = l1 / l0 #light-weighted centroid
return x0, y0
x0,y0 = light_weighted_centroid(z, np.zeros((Nx, Ny)))
print('The light-weighted centroid: (x0, y0) = ', (x0, y0))
"""
Explanation: 3.b. Light-weighted Centroid
The ight-weighted centroid can be calculated through this equation:
$x_{i,0} = \frac{\sum x_i* flux}{\sum flux}$
The FWHM of PSF should be of the same order as the maximum in {FWHM1, FWHM2}, so I use the latter to give 3FWHM range.
End of explanation
"""
def quad_fitting_centroid(z, noise):
data = z + noise
index = np.where(data == np.amax(data)) # find index of pixel with maximum value
i0, j0 = index[0].item(),index[1].item()
X0, X1, X2, X3, X4 = np.ones(9), [], [], [], []
Y = []
for i in range(i0 - 1, i0 + 2):
for j in range(j0 - 1, j0 + 2):
X1.append(i - i0)
X2.append(j - j0)
X3.append((i - i0)**2)
X4.append((j - j0)**2)
Y.append(np.log(data[i, j]))
B = np.array([X0, X1, X2, X3, X4])
A = B.transpose()
(u, w, v) = np.linalg.svd(A, full_matrices=False)
inv_w = 1. / w
c = v.transpose().dot(np.diag(inv_w)).dot(u.transpose()).dot(Y)
x0, y0 = i0 - c[1] / (2 * c[3]), j0 - c[2] / (2. * c[4])
return x0, y0
x0, y0 = quad_fitting_centroid(z, np.zeros((Nx, Ny)))
print('The centroid estimated by quadratic fitting: (x0, y0)=', (x0, y0))
"""
Explanation: 3.c. Fit Quadratic Function
Use values of 3*3 grid of pixels while the center pixel has maximum pixel value, perform quadratic fitting, i.e. fitting to function $f = c_0 + c_1 x+c_2 y +c_3 x^2 + c_4 y^2 $. With all coefficients fitted, we can estimate the centroid to be at $x_0 = - \frac{c_3}{2 c_1}, y_0 = - \frac{c_4}{2 c_2}$. Since the number of data points is 9, which is greater than the number of coefficients needed to fit, we can apply svd to make use of all data.
End of explanation
"""
def RMSE_by_MC(estimator, noise_level, signal, xc, yc): #noise=noise_level*np.random.rand(Nx,Ny)
Nx = Ny = len(signal)
nk = 1000
mse1 = []
mse2 = []
xe = np.zeros(nk)
ye = np.zeros(nk)
for k in range(nk):
noise = noise_level * np.random.randn(Nx, Ny)
txe, tye = estimator(signal, noise)
xe[k] = txe
ye[k] = tye
RMSE1 = np.sqrt(((xe - xc)**2).sum() / np.float32(nk))
RMSE2 = np.sqrt(((ye - yc)**2).sum() / np.float32(nk))
return RMSE1, RMSE2
R = int(min(FWHM2, FWHM1) / dx) + 1
print(FWHM2)
print(FWHM1)
print(R)
print(dx)
def SNR(z, noise_level, R):
index = np.where(z == np.amax(z)) # find index of pixel with maximum value
i0, j0 = index[0].item(),index[1].item()
Nx = Ny = len(z)
s = 0
n2 = 0
for i in range(Nx):
for j in range(Ny):
if ((i - i0)**2 + (j - j0)**2 <= R**2):
s = s + z[i, j]
n2 = n2 + noise_level**2
n = np.sqrt(n2)
snr = s / n
return snr
noise_level = 1 #an example
snr = SNR(z, noise_level, R)
r1, r2 = RMSE_by_MC(light_weighted_centroid, noise_level, z, xc, yc)
print(r1, r2, snr)
print(z.sum())
"""
Explanation: 3.d. Effect of Noise
Add noise to the image and estimate the position of centroid by two estimators above. Use Monte Carlo to calculate Root Mean Square Error (RMSE) of the sample mean of two estimators.
End of explanation
"""
noise_level_list = [0.05, 0.1, 0.5, 1., 2.]
snr = []
rx1,ry1 = [], []
rx2,ry2 = [], []
for noise_level in noise_level_list:
print(noise_level)
snr.append(SNR(z, noise_level, R))
rrx1, rry1 = RMSE_by_MC(light_weighted_centroid, noise_level, z, xc, yc)
rrx2, rry2 = RMSE_by_MC(quad_fitting_centroid, noise_level, z, xc, yc)
rx1.append(rrx1)
ry1.append(rry1)
rx2.append(rrx2)
ry2.append(rry2)
plt.figure()
plt.plot(np.log10(snr), np.log10(rx1), color='red', linestyle='dotted', label='xc, light-weighted')
plt.plot(np.log10(snr), np.log10(ry1), color='black', linestyle='dotted', label='yc, light-weighted')
plt.plot(np.log10(snr), np.log10(rx2), color='red', label='xc, quadratic fitting')
plt.plot(np.log10(snr), np.log10(ry2), color='black', label='yc, quadratic fitting')
plt.scatter(np.log10(snr), np.log10(rx1), color='red', s=2)
plt.scatter(np.log10(snr), np.log10(ry1), color='black', s=2)
plt.scatter(np.log10(snr), np.log10(rx2), color='red', s=2)
plt.scatter(np.log10(snr), np.log10(ry2), color='black', s=2)
plt.plot(np.log10(snr), np.log10(0.685 * FWHM1 / np.array(snr)), color='yellow')
plt.xlabel('$\log_{10}$ S/N within 2 FWHM')
plt.ylabel('$\log_{10}$ error')
plt.legend()
plt.show()
"""
Explanation: Below I generate a series of noises and calculate RMSEs of xc and yc for both estimators. Then plot RMSEs as a function of S/N within 3FWHM.
End of explanation
"""
|
rubensfernando/mba-analytics-big-data
|
Python/2016-08-08/aula7-parte5-er.ipynb
|
mit
|
import re
texto = 'um exemplo palavra:python!!'
match = re.search('python', texto)
print(match)
if match:
print('encontrou: ' + match.group())
else:
print('não encontrou')
"""
Explanation: Expressão Regular
Pesquisando
End of explanation
"""
texto = "GGATCGGAGCGGATGCC"
match = re.search(r'a[tg]c', texto, re.IGNORECASE)
if match:
print('encontrou: ' + match.group())
else:
print('não encontrou')
match
"""
Explanation: Também podemos utilizar a flag IGNORECASE
End of explanation
"""
texto = 'teste-teste@gmail-teste.com'
match = re.search(r'([\w.-]+)@([\w.-]+)', texto)
if match:
print('email:', match.group())
print('login:', match.group(1))
print('dominio:', match.group(2))
else:
print('não encontrou')
"""
Explanation: Extraindo partes de um ER
End of explanation
"""
texto = 'teste teste@gmail.com teste123 teste-123@gmail.com, python 123@123.com'
emails = re.findall(r'[\w.-]+@[\w.-]+', texto)
for email in emails:
print(email)
"""
Explanation: Encontrando todas as ocorrências
Para encontrar todas as ocorrências podemos utilizar o método re.findall(), que irá salvar todas as ocorrências em uma lista, onde cada posição representa uma ocorrência.
End of explanation
"""
arquivo = open('er-emails.txt', 'r')
"""
Explanation: Exercícios
Exercício 1
Encontre todos os e-mails do arquivo emails.txt. Utilize a função findall() e imprima cada um dos e-mails.
End of explanation
"""
# Parte 1
# Parte 2
# Parte 3a - anos
# Parte 3b - meses
# Parte 3c - dias
# Parte 3d - horário
# Parte 4
"""
Explanation: Exercício 2
Considere o arquivo er-dados.txt. Esse arquivo contêm diversas strings no formato:
Tue Feb 15 10:39:54 2028::xjkmxk@cltllsls.com
Crie um expressão regular para encontrar todos os e-mails e salve-os em uma lista chamada emails = [].
Crie um expressão regular para recuperar o login e o domínio de cada item salvo na lista emails. Salve cada item em uma lista logins = [] e domínios = []
Crie um expressão regular para recuperar o ano, mês, dia, horário.
Por fim, imprima os valores no seguinte formato:
1 - email@completo.com | login | dominio | dia/mês/ano | hora:minuto
É importante pensar em como deve-se carregar o arquivo. Repare que o horário no arquivo considera os segundos e a impressão final não.
End of explanation
"""
|
robotcator/gensim
|
docs/notebooks/sklearn_wrapper.ipynb
|
lgpl-2.1
|
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import SklearnWrapperLdaModel
"""
Explanation: Using wrappers for Scikit learn API
This tutorial is about using gensim models as a part of your scikit learn workflow with the help of wrappers found at gensim.sklearn_integration
The wrapper available (as of now) are :
* LdaModel (gensim.sklearn_integration.sklearn_wrapper_gensim_ldaModel.SklearnWrapperLdaModel),which implements gensim's LdaModel in a scikit-learn interface
LsiModel (gensim.sklearn_integration.sklearn_wrapper_gensim_lsiModel.SklearnWrapperLsiModel),which implements gensim's LsiModel in a scikit-learn interface
LdaModel
To use LdaModel begin with importing LdaModel wrapper
End of explanation
"""
from gensim.corpora import Dictionary
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer']
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
"""
Explanation: Next we will create a dummy set of texts and convert it into a corpus
End of explanation
"""
model=SklearnWrapperLdaModel(num_topics=2, id2word=dictionary, iterations=20, random_state=1)
model.fit(corpus)
model.print_topics(2)
model.transform(corpus)
"""
Explanation: Then to run the LdaModel on it
End of explanation
"""
import numpy as np
from gensim import matutils
from gensim.models.ldamodel import LdaModel
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import SklearnWrapperLdaModel
rand = np.random.mtrand.RandomState(1) # set seed for getting same result
cats = ['rec.sport.baseball', 'sci.crypt']
data = fetch_20newsgroups(subset='train', categories=cats, shuffle=True)
"""
Explanation: Integration with Sklearn
To provide a better example of how it can be used with Sklearn, Let's use CountVectorizer method of sklearn. For this example we will use 20 Newsgroups data set. We will only use the categories rec.sport.baseball and sci.crypt and use it to generate topics.
End of explanation
"""
vec = CountVectorizer(min_df=10, stop_words='english')
X = vec.fit_transform(data.data)
vocab = vec.get_feature_names() # vocab to be converted to id2word
id2word = dict([(i, s) for i, s in enumerate(vocab)])
"""
Explanation: Next, we use countvectorizer to convert the collection of text documents to a matrix of token counts.
End of explanation
"""
obj = SklearnWrapperLdaModel(id2word=id2word, num_topics=5, passes=20)
lda = obj.fit(X)
lda.print_topics()
"""
Explanation: Next, we just need to fit X and id2word to our Lda wrapper.
End of explanation
"""
from sklearn.model_selection import GridSearchCV
from gensim.models.coherencemodel import CoherenceModel
def scorer(estimator, X, y=None):
goodcm = CoherenceModel(model=estimator, texts= texts, dictionary=estimator.id2word, coherence='c_v')
return goodcm.get_coherence()
obj = SklearnWrapperLdaModel(id2word=dictionary, num_topics=5, passes=20)
parameters = {'num_topics': (2, 3, 5, 10), 'iterations': (1, 20, 50)}
model = GridSearchCV(obj, parameters, scoring=scorer, cv=5)
model.fit(corpus)
model.best_params_
"""
Explanation: Example for Using Grid Search
End of explanation
"""
from sklearn.pipeline import Pipeline
from sklearn import linear_model
def print_features_pipe(clf, vocab, n=10):
''' Better printing for sorted list '''
coef = clf.named_steps['classifier'].coef_[0]
print coef
print 'Positive features: %s' % (' '.join(['%s:%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[::-1][:n] if coef[j] > 0]))
print 'Negative features: %s' % (' '.join(['%s:%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[:n] if coef[j] < 0]))
id2word = Dictionary([_.split() for _ in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
model = SklearnWrapperLdaModel(num_topics=15, id2word=id2word, iterations=50, random_state=37)
clf = linear_model.LogisticRegression(penalty='l2', C=0.1) # l2 penalty used
pipe = Pipeline((('features', model,), ('classifier', clf)))
pipe.fit(corpus, data.target)
print_features_pipe(pipe, id2word.values())
print pipe.score(corpus, data.target)
"""
Explanation: Example of Using Pipeline
End of explanation
"""
from gensim.sklearn_integration.sklearn_wrapper_gensim_lsimodel import SklearnWrapperLsiModel
"""
Explanation: LsiModel
To use LsiModel begin with importing LsiModel wrapper
End of explanation
"""
model = SklearnWrapperLsiModel(num_topics=15, id2word=id2word)
clf = linear_model.LogisticRegression(penalty='l2', C=0.1) # l2 penalty used
pipe = Pipeline((('features', model,), ('classifier', clf)))
pipe.fit(corpus, data.target)
print_features_pipe(pipe, id2word.values())
print pipe.score(corpus, data.target)
"""
Explanation: Example of Using Pipeline
End of explanation
"""
|
schaber/deep-learning
|
gan_mnist/Intro_to_GANs_Exercises.ipynb
|
mit
|
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
"""
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, [None, real_dim], name='input_real')
inputs_z = tf.placeholder(tf.float32, [None, z_dim], name='input_z')
return inputs_real, inputs_z
"""
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
Exercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.
End of explanation
"""
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
#h1 = tf.contrib.layers.fully_connected(z, n_units)
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha*h1, h1)
# Logits and tanh output
#logits = tf.contrib.layers.fully_connected(h1, out_dim)
logits = tf.layers.dense(h1, out_dim)
out = tf.tanh(logits)
return out
"""
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
Exercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.
End of explanation
"""
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
#h1 = tf.contrib.layers.fully_connected(x, n_units)
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha*h1, h1)
#logits = tf.contrib.layers.fully_connected(h1, 1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
Exercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.
End of explanation
"""
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
"""
Explanation: Hyperparameters
End of explanation
"""
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)
"""
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
Exercise: Build the network from the functions you defined earlier.
End of explanation
"""
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_real, labels=tf.ones_like(d_logits_real)*(1.0-smooth)
))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_fake, labels=tf.zeros_like(d_logits_real)
))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)
))
"""
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will be sigmoid cross-entropies, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
Exercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.
End of explanation
"""
for v in tf.trainable_variables():
print(v.name)
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = []
d_vars = []
for v in t_vars:
if v.name.startswith('generator'):
g_vars.append(v)
elif v.name.startswith('discriminator'):
d_vars.append(v)
else:
print('Unexpected variable: {}'.format(v))
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
"""
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want the generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables that start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to the var_list keyword argument of the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
Exercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.
End of explanation
"""
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
"""
Explanation: Training
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
"""
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
"""
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
"""
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
"""
_ = view_samples(-1, samples)
"""
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
"""
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
"""
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
"""
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
"""
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation
"""
|
johnhw/summerschool2016
|
unsupervised_image_learning/manifold_2.ipynb
|
mit
|
import numpy as np
import sklearn.datasets, sklearn.linear_model, sklearn.neighbors
import sklearn.manifold, sklearn.cluster
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os, time
import scipy.io.wavfile, scipy.signal
import cv2
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (18.0, 10.0)
from jslog import js_key_update
# This code logs keystrokes IN THIS JUPYTER NOTEBOOK WINDOW ONLY (not any other activity)
# Log file is ../jupyter_keylog.csv
%%javascript
function push_key(e,t,n){var o=keys.push([e,t,n]);o>500&&(kernel.execute("js_key_update(["+keys+"])"),keys=[])}var keys=[],tstart=window.performance.now(),last_down_t=0,key_states={},kernel=IPython.notebook.kernel;document.onkeydown=function(e){var t=window.performance.now()-tstart;key_states[e.which]=[t,last_down_t],last_down_t=t},document.onkeyup=function(e){var t=window.performance.now()-tstart,n=key_states[e.which];if(void 0!=n){var o=n[0],s=n[1];if(0!=s){var a=t-o,r=o-s;push_key(e.which,a,r),delete n[e.which]}}};
"""
Explanation: Manifold Learning: Part II
End of explanation
"""
## Self organising maps
digits = sklearn.datasets.load_digits()
digits.data -= 8.0
import som
som = reload(som)
som_map = som.SOM(48,48,64)
som_map.learn(digits.data, epochs=50000)
for v in [20,30,40,50]:
plt.figure()
plt.imshow(som_map.codebook[:,:,v], cmap="magma", interpolation="nearest")
plt.axis("off")
plt.imshow(som_map.codebook[10,10,:].reshape(8,8), cmap="gray", interpolation="nearest")
plt.grid("off")
plt.figure(figsize=(32,32))
for i in range(0,48,2):
for j in range(0,48,2):
img = som_map.codebook[i,j,:].reshape(8,8)
plt.imshow(img, cmap="gray", extent=[i,i+2,j,j+2])
plt.xlim(0,48)
plt.ylim(0,48)
plt.axis("off")
"""
Explanation: <a id="manifold"></a>
Dimensional reduction
A very common unsupervised learning task is dimensional reduction; taking a dataset with a dimension of $d_h$ and reducing to a dimension of $d_l$ which is smaller than $d_h$ but retains as much of the useful information as possible. The most common application is for visualisation, because humans are best at interpreting 2D data and struggle with higher dimensions.
Even 3D structure can be tricky for humans to get their heads around!
<img src="imgs/topologic.jpg">
Dimensional reduction can be thought of as a form of lossy compression -- finding a "simpler" representation of the data which captures its essential properties. This of course depends upon what the "essential properties" that we want to keep are, but generally we want to reject noise and keep non-random structure. We find a subspace that captures the meaningful variation of a dataset.
One way of viewing this process is finding latent variables; variables we did not directly observe, but which are simple explanations of the ones we did observe. For example, if we measure a large number of weather measurements (rainfall, pressure, humidity, windspeed), these might be a very redundant representation of a few simple variables (e.g. is there a storm?). If features correlate or cluster in the measured data we can learn this structure even without knowing training labels.
Manifold learning
One way of looking at this problem is learning a manifold on which the data lies (or lies close to). A manifold is a geometrical structure which is locally like a low-dimensional Euclidean space. Imagine data points lying on the surface of a sheet of paper crumpled into a ball, or a 1D filament or string tangled up in a 3D space.
Manifold approaches attempt to automatically find these smooth embedded structures by examining the local structure of datapoints (often by analysing the nearest neighbour graph of points). This is more flexible than linear dimensional reduction as it can in theory unravel very complex or tangled datasets.
However, the algorithms are usually approximate, they do not give guarantees that they will find a given manifold, and can be computationally intensive to run.
<img src="imgs/isomap.jpg">
Self organising maps
<a id="som"></a>
Self-organising maps are a nice half way house between clustering and manifold learning approaches. They create a dense "net" of clusters in the original (high-dimensional space), and force the cluster points to also lie in a low-dimensional space with local structure, for example, on a regular 2D grid. This maps a discretized low-dimensional space into the high-dimensional space.
The algorithm causes the clusters have local smoothness in both the high and the low dimensional space; it does this by forcing cluster points on the grid to move closer (in the high-d space) to their neighbours (in the low-d grid).
<img src="imgs/somtraining.png"> [Image from https://en.wikipedia.org/wiki/Self-organizing_map]
In other words: clusters that are close together in the high-dimensional space should be close together in the low dimensional space. This "unravels" high dimensional structure into a simple low-dimensional approximation.
End of explanation
"""
import scipy.spatial.distance
def umatrix(codebook):
## take the average HD distance to all neighbours within
## certain radius in the 2D distance
x_code, y_code = np.meshgrid(np.arange(codebook.shape[0]), np.arange(codebook.shape[1]))
hdmatrix = codebook.reshape(codebook.shape[0]*codebook.shape[1], codebook.shape[2])
hd_distance = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(hdmatrix))**2
ld_distance = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(np.vstack([x_code.ravel(), y_code.ravel()]).T))
return np.mean(hd_distance * (np.logical_and(ld_distance>0,ld_distance<1.5)),axis=1).reshape(codebook.shape[0], codebook.shape[1])
plt.figure(figsize=(14,14))
um = umatrix(som_map.codebook)
plt.imshow(um, interpolation="nearest", cmap="viridis")
plt.grid("off")
"""
Explanation: The U-Matrix
One very nice aspect of the self-organsing map is that we can extract the U-matrix which captures how close together in the high-dimensional space points in the low-dimensional map are. This lets us see whether there are natural partitions in the layout; wrinkles in the layout that might be good clustering points.
End of explanation
"""
# load a video of my head in different orientations
face_frames = np.load("data/face_frames.npz")['arr_0']
# show the video in opencv -- it's just a raw sequence of values
# the video is 700 frames of 64x64 imagery
frame_ctr = 0
# play the video back
while frame_ctr<face_frames.shape[1]:
frame = face_frames[:,frame_ctr].reshape(64,64)
cv2.imshow('Face video', cv2.resize(frame, (512,512), interpolation=cv2.INTER_NEAREST))
frame_ctr += 1
key = cv2.waitKey(1) & 0xff
if key == 27:
break
# clean up
cv2.destroyAllWindows()
# fit isomap to the face data (this takes a few minutes)
faces = face_frames.T
isomap = sklearn.manifold.Isomap(n_neighbors=25)
isomap.fit(faces)
xy = isomap.transform(faces)
orig_xy = np.array(xy)
## the following code just plots images on the plot without overlap
overlaps = []
def is_overlap(ra,rb):
P1X, P2X, P1Y, P2Y = ra
P3X, P4X, P3Y, P4Y = rb
return not ( P2X <= P3X or P1X >= P4X or P2Y <= P3Y or P1Y >= P4Y )
def overlap_test(r):
if any([is_overlap(r,rb) for rb in overlaps]):
return False
overlaps.append(r)
return True
def plot_some_faces(xy, faces, thin=1.0, sz=8):
global overlaps
overlaps = []
q = sz/4
for i in range(len(xy)):
x, y = xy[i,0], xy[i,1]
image = faces[i,:].copy()
if np.random.random()<thin:
for j in range(10):
x, y = xy[i,0], xy[i,1]
x += np.random.uniform(-q,q)
y += np.random.uniform(-q, q)
x *= q
y *= q
extent = [x, x+sz, y, y+sz]
if overlap_test(extent):
img = image.reshape(64,64)
img[:,0] = 1
img[:,-1] = 1
img[0,:] = 1
img[-1,:] = 1
plt.imshow(img, vmin=0, vmax=1, cmap="gray",interpolation="lanczos",extent=extent, zorder=100)
break
## make a 2D plot of the faces
# tweak co-ordinates
xy[:,0] = -orig_xy[:,0] / 2.5
xy[:,1] = orig_xy[:,1]
plt.figure(figsize=(20,20))
# plot the faces
plot_some_faces(xy, faces, sz=10)
# the axes correctly
plt.xlim(np.min(xy[:,0])-10,np.max(xy[:,0])+10)
plt.ylim(np.min(xy[:,1])-10,np.max(xy[:,1])+10)
plt.gca().patch.set_facecolor('gray')
plt.xlim(-70,70)
plt.ylim(-70,70)
plt.grid("off")
frame_ctr = 0
# play the video back, but show the projected dimension on the screen
while frame_ctr<face_frames.shape[1]:
frame = face_frames[:,frame_ctr].reshape(64,64)
frame = (frame*256).astype(np.uint8)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
xy = isomap.transform([face_frames[:,frame_ctr]])
cx, cy = 256, 256
s = 6
x,y = xy[0]
resized_frame = cv2.resize(frame, (512,512), interpolation=cv2.INTER_NEAREST)
cv2.circle(resized_frame, (int(cx-x*s), int(cy-y*s)), 10, (0,255,0), -1)
cv2.imshow('Face video', resized_frame)
frame_ctr += 1
key = cv2.waitKey(1) & 0xff
if key == 27:
break
cv2.destroyAllWindows()
"""
Explanation: ISOMAP: The face-direction example
<a id="isomap"></a>
A popular manifold learning algorithm is ISOMAP which uses nearest neighbour graphs to identify locally connected parts of a dataset. This examines local neighbor graphs to find an "unraveling" of the space to a 1D or 2D subspace, which can deal with very warped high-dimensional data, and doesn't get confused by examples like the swiss roll above (assuming parameters are set correctly!).
Let's use ISOMAP (a local neighbours embedding approach) to build a real, working vision based interface.
End of explanation
"""
# simple OpenCV image capture from the video device
class Webcam(object):
def __init__(self, cam_id=0):
cap = cv2.VideoCapture(cam_id)
def snap(self):
ret, frame = cap.read()
return frame
# snap(), snap(), snap()...
# Solution
"""
Explanation: Mapping UI controls to unsupervised structures
<a id="mapping"></a>
The point of all of this is to find control structures in sensor data. That is, to find regularities in measured values that we could use to control a user interface.
To do this, we need to map unsupervised structure onto the interface itself. We could at this point move to a supervised approach, now that we have likely candidates to target. But a simpler approach is just to hand-map unsupervised structure to controls.
Clusters
For example, if we have clustered a set of data (e.g. measurements of the joint angles of the hand), and extracted a set of fundamental poses, we can then create a mapping table from cluster indices to actions.
|cluster | 1 | 2 | 3 | 4 |
|-----------------------------------------------|
|action | confirm | cancel | increase | decrease |
<img src="imgs/handposes.jpg" width="400px">
Distance transform
Sometimes it is useful to have some continuous elements in an otherwise discrete interface (e.g. to support animation on state-transitions). A useful trick is to use a distance transform, which takes a datapoint in the original measured space $D_H$ and returns the distances to all cluster centres. (sklearn's transform function for certain clustering algorithms does this transformation for you)
This could be used, for example, to find the top two candidates for a hand pose, and show a smooth transition between actions as the hand interpolates between them.
The most obvious use of this is to disable any action when the distance to all clusters is too great. This implements a quiescent state and is part of solving the Midas touch problem; you only spend a small amount of time on a UI actively interacting and don't want to trigger actions all the time!
Manifolds
In the continuous case, with a dimensional reduction approach, then the mapping can often be a simple transformation of the inferred manifold. This usually requires that the manifold be oriented correctly; for example, in the head pointing example, I adjusted the signs of the resulting 2D manifold to match the direction my nose points in. More generally, it might be necessary to apply a scaling or rotation of the output with a linear transform:
$$ x_l = f(x_h)\
x_c = Ax_l,
$$ where $x_l$ is the low-dimensional vector, $x_h$ is high dimensional sensor vector, $x_c$ is the vector (e.g. a cursor) we pass to the UI, and $A$ is a hand-tuned or learned transformation matrix.
As an example, $A = \begin{bmatrix}0 & 1 \ -1 & 1\end{bmatrix}$ exchanges the $x$ and $y$ co-ordinate and flips the sign of $y$.
<img src="imgs/orienting.png">
Challenge
<a id="challenge"></a>
In this practical, you will capture images from your webcam, and build a UI control using unlabeled data. Without providing any class labels or values, you have to build an interaction that can do "something interesting" from the image data.
You have complete freedom to choose what the configuration space you want to use is; you could take images of your face or hands; take images of drawn figures; image an object rotating or moving across a surface; or anything else you want.
As an illustrative example, the unsupervised approach could be used to image a soft drinks can at different rotations, and recover the rotation angle as an input (i.e. as a physical "dial").
<img src="imgs/can.jpg">
The criterion is the most interesting but functional interface. The control can be discrete (using clustering) or continuous (using manifold learning). You don't have to map the controls onto a real UI, just extract and visualise a useful signal from the image data.
The final system should be able to take a webcam image and output either a class or a (possibly $n$-dimensional) continuous value.
Tips
The webcam capture code is provided for you. cam = Webcam() creates a camera object and img = cam.snap() captures a single image from the first video device; if you have several, then you can use cam = Webcam(1) etc. to select the device to use. The result will be a $W\times H\times 3$ NumPy array, with colours in the BGR order.
You should resize your image (using scipy.ndimage.zoom) to something small (e.g. 32x48 or 64x64) so that the learning is feasible in the time available.
Your "interface" should probably show a 2D or 1D layout of the data in the training set, and have a mode where a new webcam image can be captured and plotted on the layout. You should consider colouring the data points by their attributes (e.g. cluster label) and/or showing some small images on the plot to get an idea of what is going on.
You can preprocess features as you like, but a good clustering/manifold learning algorithm will be able to capture much of the structure without this. The simplicity of the processing applied will considered in judging!; minimise the amount of hand-tweaking that you do.
Remember that some layout algorithms (e.g. t-SNE) are unstable. You may want to run the dimensional reduction several times and choose a good result, and use a repeatable random number seed (e.g. set it using np.random.seed or pass a custom RandomState to sklearn).
End of explanation
"""
|
bumblebeefr/poppy_rate
|
[remote] Webservice REST.ipynb
|
gpl-2.0
|
#imports and initilaize virutal poppy using vrep
from pypot.vrep import from_vrep
from poppy.creatures import PoppyHumanoid
robot = PoppyHumanoid(simulator='vrep')
#import and initialize physical poppy
from poppy.creatures import PoppyHumanoid
robot = PoppyHumanoid()
from pypot.server import HTTPRobotServer
server = HTTPRobotServer(robot,'127.0.0.1',8081)
"""
Explanation: Poppy Web Service
Demarrage d'un web service poppy avec HTTPRobotServer
End of explanation
"""
import json
import numpy
import bottle
import logging
from bottle import response
from pypot.server.server import AbstractServer
logger = logging.getLogger(__name__)
class MyJSONEncoder(json.JSONEncoder):
""" JSONEncoder which tries to call a json property before using the enconding default function. """
def default(self, obj):
if isinstance(obj, numpy.ndarray):
return list(obj)
return json.JSONEncoder.default(self, obj)
class EnableCors(object):
"""Enable CORS (Cross-Origin Resource Sharing) headers"""
name = 'enable_cors'
api = 2
def __init__(self,origin="*"):
self.origin = origin
def apply(self, fn, context):
def _enable_cors(*args, **kwargs):
# set CORS headers
response.headers['Access-Control-Allow-Origin'] = self.origin
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if bottle.request.method != 'OPTIONS':
# actual request; reply with the actual response
return fn(*args, **kwargs)
return _enable_cors
class HTTPRobotServer(AbstractServer):
""" Bottle based HTTPServer used to remote access a robot.
Please refer to the REST API for an exhaustive list of the possible routes.
"""
def __init__(self, robot, host, port, cross_domain_origin=None):
AbstractServer.__init__(self, robot, host, port)
self.app = bottle.Bottle()
jd = lambda s: json.dumps(s, cls=MyJSONEncoder)
self.app.install(bottle.JSONPlugin(json_dumps=jd))
if(cross_domain_origin):
self.app.install(EnableCors(cross_domain_origin))
rr = self.restfull_robot
# Motors route
@self.app.get('/motor/list.json')
@self.app.get('/motor/<alias>/list.json')
def get_motor_list(alias='motors'):
return {
alias: rr.get_motors_list(alias)
}
@self.app.get('/sensor/list.json')
def get_sensor_list():
return {
'sensors': rr.get_sensors_list()
}
@self.app.get('/motor/alias/list.json')
def get_motor_alias():
return {
'alias': rr.get_motors_alias()
}
@self.app.get('/motor/<motor_name>/register/list.json')
@self.app.get('/sensor/<motor_name>/register/list.json')
def get_motor_registers(motor_name):
return {
'registers': rr.get_motor_registers_list(motor_name)
}
@self.app.get('/motor/<motor_name>/register/<register_name>')
@self.app.get('/sensor/<motor_name>/register/<register_name>')
def get_register_value(motor_name, register_name):
return {
register_name: rr.get_motor_register_value(motor_name, register_name)
}
@self.app.post('/motor/<motor_name>/register/<register_name>/value.json')
@self.app.post('/sensor/<motor_name>/register/<register_name>/value.json')
def set_register_value(motor_name, register_name):
rr.set_motor_register_value(motor_name, register_name,
bottle.request.json)
return {}
# Sensors route
# Primitives route
@self.app.get('/primitive/list.json')
def get_primitives_list(self):
return {
'primitives': rr.get_primitives_list()
}
@self.app.get('/primitive/running/list.json')
def get_running_primitives_list(self):
return {
'running_primitives': rr.get_running_primitives_list()
}
@self.app.get('/primitive/<prim>/start.json')
def start_primitive(self, prim):
rr.start_primitive(prim)
@self.app.get('/primitive/<prim>/stop.json')
def stop_primitive(self, prim):
rr.stop_primitive(prim)
@self.app.get('/primitive/<prim>/pause.json')
def pause_primitive(self, prim):
rr.pause_primitive(prim)
@self.app.get('/primitive/<prim>/resume.json')
def resume_primitive(self, prim):
rr.resume_primitive(prim)
@self.app.get('/primitive/<prim>/property/list.json')
def get_primitive_properties_list(self, prim):
return {
'property': rr.get_primitive_properties_list(prim)
}
@self.app.get('/primitive/<prim>/property/<prop>')
def get_primitive_property(self, prim, prop):
res = rr.get_primitive_property(prim, prop)
return {
'{}.{}'.format(prim, prop): res
}
@self.app.post('/primitive/<prim>/property/<prop>/value.json')
def set_primitive_property(self, prim, prop):
rr.set_primitive_property(prim, prop,
bottle.request.json)
@self.app.get('/primitive/<prim>/method/list.json')
def get_primitive_methods_list(self, prim):
return {
'methods': rr.get_primitive_methods_list(self, prim)
}
@self.app.post('/primitive/<prim>/method/<meth>/args.json')
def call_primitive_method(self, prim, meth):
res = rr.call_primitive_method(prim, meth,
bottle.request.json)
return {
'{}:{}'.format(prim, meth): res
}
def run(self, quiet=False, server='tornado'):
""" Start the bottle server, run forever. """
bottle.run(self.app,
host=self.host, port=self.port,
quiet=quiet,
server=server)
server = HTTPRobotServer(robot,'127.0.0.1',8082,cross_domain_origin='*')
"""
Explanation: http://127.0.0.1:8081/motor/list.json
Version modifiée de HTTPRobotServer donnant la possibilité d'utiliser les headers CORS pour tapper sur le service web en ajax d'où l'on veu :
End of explanation
"""
try:
server.run()
except RuntimeError as e:
print(e)
"""
Explanation: Lancement du serveur :
Dans ipython notebook le lancement du serveur genere une erreur IOLoop is already running, probablement due au fait que tornadoest deja lancé puisqu'il fait tourner le notebook. Le serveur est tout de meme lancé pour le couper il suffit de redemarer le kenel. Cette erreur n'a pas lieu lorsque l'on est dans un script ou dans ipython en console.
End of explanation
"""
robot.stand_position.start()
robot.compliant = True
"""
Explanation: Le script start_servers.py crée une instance de poppy, puis de lance un serveur WEB/REST (HTTPRobotServer) ainsi qu'un serveur RPC (RobotServer) en tache de fond dans des thread.
End of explanation
"""
|
csadorf/signac
|
doc/signac_102_Exploring_Data.ipynb
|
bsd-3-clause
|
import signac
project = signac.get_project('projects/tutorial')
"""
Explanation: 1.2 Exploring Data
Finding jobs
In section one of this tutorial, we evaluated the ideal gas equation and stored the results in the job document and in a file called V.txt.
Let's now have a look at how we can explore our data space for basic and advanced analysis.
We already saw how to iterate over the complete data space using the "for job in project" expression.
This is a short-hand notation for "for job in project.find_jobs()", meaning: "find all jobs".
Instead of finding all jobs, we can also find a subset using filters.
Let's get started by getting a handle on our project using the get_project() function.
We don't need to initialize the project again, since we already did that in section 1.
End of explanation
"""
for job in project.find_jobs({'p': 10.0}):
print(job.statepoint())
"""
Explanation: Next, we assume that we would like to find all jobs, where p=10.0. For this, we can use the find_jobs() method, which takes a dictionary of parameters as filter argument.
End of explanation
"""
jobs_p_gt_0_1 = [job for job in project if job.sp.p > 0.1]
for job in jobs_p_gt_0_1:
print(job.statepoint(), job.document)
"""
Explanation: In this case, that is of course only a single job.
You can execute the same kind of find operation on the command line with $ signac find, as will be shown later.
While the filtering method is optimized for a simple dissection of the data space, it is possible to construct more complex query routines for example using list comprehensions.
This is an example for how to select all jobs where the pressure p is greater than 0.1:
End of explanation
"""
for doc in project.index():
print(doc)
"""
Explanation: Finding jobs by certain criteria requires an index of the data space.
In all previous examples this index was created implicitly, however depending on the data space size, it may make sense to create the index explicitly for multiple uses. This is shown in the next section.
Indexing
An index is a complete record of the data and its associated metadata within our project’s data space. To generate an index for our project's data space, use the index() method:
End of explanation
"""
index = signac.Collection(project.index())
for doc in index.find({'statepoint.p': 10.0}):
print(doc)
"""
Explanation: Using and index to operate on data is particular useful in later stages of a computational investigation, where data may come from different projects and the actual storage location of files is less important.
You can store the index wherever it may be useful, e.g., a file, a database, or even just in a variable for repeated find operations within one script.
The signac framework provides the Collection class, which can be utilized to manage indexes in memory and on disk.
End of explanation
"""
project.create_linked_view(prefix='projects/tutorial/view')
% ls projects/tutorial/view
"""
Explanation: Views
Sometimes we want to examine our data on the file system directly. However the file paths within the workspace are obfuscated by the job id. The solution is to use views, which are human-readable, maximally compact hierarchical links to our data space.
To create a linked view we simply execute the create_linked_view() method within python or the $ signac view command on the command line.
End of explanation
"""
% ls 'projects/tutorial/view/p_1.0/job/'
% cat 'projects/tutorial/view/p_1.0/job/V.txt'
"""
Explanation: The view paths only contain parameters which actually vary across the different jobs.
In this example, that is only the pressure p.
This allows us to examine the data with highly-compact human-readable path names:
End of explanation
"""
|
dadavidson/Python_Lab
|
Complete-Python-Bootcamp/Strings.ipynb
|
mit
|
# Single word
'hello'
# Entire phrase
'This is also a string'
# We can also use double quote
"String built with double quotes"
# Be careful with quotes!
' I'm using single quotes, but will create an error'
"""
Explanation: Strings
Strings are used in Python to record text information, such as name. Strings in Python are actually a sequence, which basically means Python keeps track of every element in the string as a sequence. For example, Python understands the string "hello' to be a sequence of letters in a specific order. This means we will be able to use indexing to grab particular letters (like the first letter, or the last letter).
This idea of a sequence is an important one in Python and we will touch upon it later on in the future.
In this lecture we'll learn about the following:
1.) Creating Strings
2.) Printing Strings
3.) Differences in Printing in Python 2 vs 3
4.) String Indexing and Slicing
5.) String Properties
6.) String Methods
7.) Print Formatting
Creating a String
To create a string in Python you need to use either single quotes or double quotes. For example:
End of explanation
"""
"Now I'm ready to use the single quotes inside a string!"
"""
Explanation: The reason for the error above is because the single quote in I'm stopped the string. You can use combinations of double and single quotes to get the complete statement.
End of explanation
"""
# We can simply declare a string
'Hello World'
# note that we can't output multiple strings this way
'Hello World 1'
'Hello World 2'
"""
Explanation: Now let's learn about printing strings!
Printing a String
Using Jupyter notebook with just a string in a cell will automatically output strings, but the correct way to display strings in your output is by using a print function.
End of explanation
"""
print 'Hello World 1'
print 'Hello World 2'
print 'Use \n to print a new line'
print '\n'
print 'See what I mean?'
"""
Explanation: We can use a print statement to print a string.
End of explanation
"""
# To use print function from Python 3 in Python 2
from __future__ import print_function
print('Hello World')
"""
Explanation: <font color='red'>Python 3 Alert!</font>
Something to note. In Python 3, print is a function, not a statement. So you would print statements like this:
print('Hello World')
If you want to use this functionality in Python2, you can import form the future module.
A word of caution, after importing this you won't be able to choose the print statement method anymore. So pick whichever one you prefer depending on your Python installation and continue on with it.
End of explanation
"""
len('Hello World')
"""
Explanation: String Basics
We can also use a function called len() to check the length of a string!
End of explanation
"""
# Assign s as a string
s = 'Hello World'
#Check
s
# Print the object
print(s)
"""
Explanation: String Indexing
We know strings are a sequence, which means Python can use indexes to call parts of the sequence. Let's learn how this works.
In Python, we use brackets [] after an object to call its index. We should also note that indexing starts at 0 for Python. Let's create a new object called s and the walk through a few examples of indexing.
End of explanation
"""
# Show first element (in this case a letter)
s[0]
s[1]
s[2]
"""
Explanation: Let's start indexing!
End of explanation
"""
# Grab everything past the first term all the way to the length of s which is len(s)
s[1:]
# Note that there is no change to the original s
s
# Grab everything UP TO the 3rd index
s[:3]
"""
Explanation: We can use a : to perform slicing which grabs everything up to a designated point. For example:
End of explanation
"""
#Everything
s[:]
"""
Explanation: Note the above slicing. Here we're telling Python to grab everything from 0 up to 3. It doesn't include the 3rd index. You'll notice this a lot in Python, where statements and are usually in the context of "up to, but not including".
End of explanation
"""
# Last letter (one index behind 0 so it loops back around)
s[-1]
# Grab everything but the last letter
s[:-1]
"""
Explanation: We can also use negative indexing to go backwards.
End of explanation
"""
# Grab everything, but go in steps size of 1
s[::1]
# Grab everything, but go in step sizes of 2
s[::2]
# We can use this to print a string backwards
s[::-1]
"""
Explanation: We can also use index and slice notation to grab elements of a sequence by a specified step size (the default is 1). For instance we can use two colons in a row and then a number specifying the frequency to grab elements. For example:
End of explanation
"""
s
# Let's try to change the first letter to 'x'
s[0] = 'x'
"""
Explanation: String Properties
Its important to note that strings have an important property known as immutability. This means that once a string is created, the elements within it can not be changed or replaced. For example:
End of explanation
"""
s
# Concatenate strings!
s + ' concatenate me!'
# We can reassign s completely though!
s = s + ' concatenate me!'
print(s)
s
"""
Explanation: Notice how the error tells us directly what we can't do, change the item assignment!
Something we can do is concatenate strings!
End of explanation
"""
letter = 'z'
letter*10
"""
Explanation: We can use the multiplication symbol to create repetition!
End of explanation
"""
s
# Upper Case a string
s.upper()
# Lower case
s.lower()
# Split a string by blank space (this is the default)
s.split()
# Split by a specific element (doesn't include the element that was split on)
s.split('W')
"""
Explanation: Basic Built-in String methods
Objects in Python usually have built-in methods. These methods are functions inside the object (we will learn about these in much more depth later) that can perform actions or commands on the object itself.
We call methods with a period and then the method name. Methods are in the form:
object.method(parameters)
Where parameters are extra arguments we can pass into the method. Don't worry if the details don't make 100% sense right now. Later on we will be creating our own objects and functions!
Here are some examples of built-in methods in strings:
End of explanation
"""
'Insert another string with curly brackets: {}'.format('The inserted string')
"""
Explanation: There are many more methods than the ones covered here. Visit the advanced String section to find out more!
Print Formatting
We can use the .format() method to add formatted objects to printed string statements.
The easiest way to show this is through an example:
End of explanation
"""
|
molgor/spystats
|
notebooks/Sandboxes/Sketches_for_geopystats.ipynb
|
bsd-2-clause
|
from external_plugins.spystats import tools
%run ../HEC_runs/fit_fia_logbiomass_logspp_GLS.py
from external_plugins.spystats import tools
hx = np.linspace(0,800000,100)
"""
Explanation: Sketches for automating spatial models
This notebook is for designing the tool box and methods for fitting spatial data.
I´m using the library Geopystats (before spystats)
Requirements
Given a dataset in Geopandas dataframe, create the Variogram object.
And read from the file the variogram data
End of explanation
"""
new_data.residuals[:10]
"""
Explanation: The object new_data has been reprojected to Alberts and a linear model have been fitted with residuals stored as residuals
End of explanation
"""
gvg.plot(refresh=False,legend=False,percentage_trunked=20)
plt.title("Semivariogram of residuals $log(Biomass) ~ log(SppR)$")
## HERE we can cast a model (Whittle) and fit it inside the global variogram
whittle_model = tools.WhittleVariogram(sill=0.345,range_a=100000,nugget=0.33,alpha=1.0)
tt = gvg.fitVariogramModel(whittle_model)
plt.plot(hx,gvg.model.f(hx),'--',lw=4,c='black')
print(whittle_model)
## This section is an example for calculating GLS. Using a small section because of computing intensity
minx = -85
maxx = -80
miny = 30
maxy = 35
section = tools._subselectDataFrameByCoordinates(new_data,'LON','LAT',minx,maxx,miny,maxy)
secvg = tools.Variogram(section,'logBiomass',model=whittle_model)
MMdist = secvg.distance_coordinates.flatten()
CovMat = secvg.model.corr_f(MMdist).reshape(len(section),len(section))
plt.imshow(CovMat)
import statsmodels.regression.linear_model as lm
import statsmodels.api as sm
model1 = lm.GLS.from_formula(formula='logBiomass ~ logSppN',data=section,sigma=CovMat)
results = model1.fit()
resum = results.summary()
k = resum.as_csv()
## Without spatial structure
Id = np.identity(len(section))
model2 = lm.GLS.from_formula(formula='logBiomass ~ logSppN',data=section,sigma=Id)
results = model2.fit()
smm =results.summary()
## Without spatial structure
import statsmodels.formula.api as smf
model3 = smf.ols(formula='logBiomass ~ logSppN',data=section)
results = model3.fit()
results.summary()
"""
Explanation: The empirical variogram
The empirical variogram has been calculated already using the HEC. A variogram object has been created which takes the values from the previously calculated in HEC
End of explanation
"""
from scipy.stats import multivariate_normal as mvn
from scipy.spatial import distance_matrix
n = 50
nx = np.linspace(0,100,n)
xx, yy = np.meshgrid(nx,nx)
points = np.vstack([ xx.ravel(), yy.ravel()]).transpose()## Generate dist matrix
Mdist = distance_matrix(points,points)
plt.imshow(Mdist)
Mdist.shape
covmat = secvg.model.corr_f(Mdist.flatten()).reshape(Mdist.shape)
plt.imshow(covmat)
meanx = np.zeros(n*n)
sim1 = mvn.rvs(mean=meanx,cov=covmat)
plt.imshow(sim1.reshape(n,n),interpolation=None)
%time sim2 = mvn.rvs(mean=meanx,cov=covmat)
plt.imshow(sim2.reshape(n,n))
"""
Explanation: Bonus! simulation of the random processs
End of explanation
"""
matm = tools.MaternVariogram(sill=0.34,range_a=100000,nugget=0.33,kappa=0.5)
expmm = tools.ExponentialVariogram(sill=0.34,range_a=100000,nugget=0.33)
gausms = tools.GaussianVariogram(sill=0.34,range_a=100000,nugget=0.33)
sphmm = tools.SphericalVariogram(sill=0.34,range_a=100000,nugget=0.33)
wm = tools.WhittleVariogram(sill=0.34,range_a=100000,nugget=0.33,alpha=1)
map(lambda l : l.fit(gvg), [matm,expmm,gausms,sphmm,wm])
print(matm)
print(expmm)
print(gausms)
print(sphmm)
print(wm)
"""
Explanation: Bonus!
Fitting the model to the empirical variogram
It´s included as a method in the VariogramModel class.
End of explanation
"""
|
ethen8181/machine-learning
|
model_deployment/fastapi_kubernetes/tree_model_deployment.ipynb
|
mit
|
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import os
import time
import numpy as np
import pandas as pd
import lightgbm as lgb
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split
from sklearn.datasets.california_housing import fetch_california_housing
# prevent scientific notations
pd.set_option('display.float_format', lambda x: '%.3f' % x)
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,sklearn,lightgbm
"""
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Tree-Model-Deployment" data-toc-modified-id="Tree-Model-Deployment-1"><span class="toc-item-num">1 </span>Tree Model Deployment</a></span><ul class="toc-item"><li><span><a href="#Model-Training" data-toc-modified-id="Model-Training-1.1"><span class="toc-item-num">1.1 </span>Model Training</a></span></li><li><span><a href="#Calling-the-API" data-toc-modified-id="Calling-the-API-1.2"><span class="toc-item-num">1.2 </span>Calling the API</a></span></li></ul></li></ul></div>
End of explanation
"""
cal_housing = fetch_california_housing()
print('feature names:', cal_housing.feature_names)
print('data shape: ', cal_housing.data.shape)
print('description:')
print(cal_housing.DESCR)
"""
Explanation: Tree Model Deployment
We'll try and keep the data, feature engineering, model training part as short as possible as the main focus of the repo is to build a service on top of the model.
Model Training
Loads the dataset.
End of explanation
"""
test_size = 0.2
random_state = 123
X_train, X_test, y_train, y_test = train_test_split(
cal_housing.data,
cal_housing.target,
test_size=test_size,
random_state=random_state)
print(cal_housing.feature_names)
"""
Explanation: A quick train/test split.
End of explanation
"""
dtrain = lgb.Dataset(X_train, y_train,
feature_name=cal_housing.feature_names,
free_raw_data=False)
dtest = lgb.Dataset(X_test, y_test,
feature_name=cal_housing.feature_names,
free_raw_data=False)
dtrain
params_constraint = {
'nthread': 6,
'seed': 0,
'metric': 'rmse',
'eta': 0.1,
'max_depth': 5
}
evals_result = {}
model = lgb.train(
params_constraint, dtrain,
valid_sets=[dtrain, dtest],
evals_result=evals_result,
num_boost_round=1000,
early_stopping_rounds=10,
verbose_eval=50)
"""
Explanation: Following the LightGBM Python Quickstart to train the model.
End of explanation
"""
def mape_score(y_true, y_score):
"""Mean Absolute Percentage Error (MAPE)."""
mask = y_true != 0
y_true = y_true[mask]
y_score = y_score[mask]
mape = np.abs(y_true - y_score) / y_true
return np.mean(mape)
def compute_score(model, dataset, verbose=True):
"""
Computes the model evaluation score (r2, rmse, mape) for the
input model and dataset.
"""
y_true = dataset.get_label()
y_score = model.predict(dataset.get_data())
r2 = round(metrics.r2_score(y_true, y_score), 3)
rmse = round(np.sqrt(metrics.mean_squared_error(y_true, y_score)), 3)
mape = round(mape_score(y_true, y_score), 3)
if verbose:
print('r2: ', r2)
print('rmse: ', rmse)
print('mape: ', mape)
return r2, rmse, mape
r2, rmse, mape = compute_score(model, dtest)
"""
Explanation: Quick evaluation of our regression model.
End of explanation
"""
save_path = os.path.join('app', 'model.txt')
model.save_model(save_path, num_iteration=model.best_iteration)
"""
Explanation: Saves the trained model under the app folder.
End of explanation
"""
predictions = model.predict(dtest.get_data())
predictions
model_loaded = lgb.Booster(model_file=save_path)
predictions = model_loaded.predict(dtest.get_data())
predictions
"""
Explanation: Ensure the prediction between the model and the saved model matches. Here we pass in the whole test set.
End of explanation
"""
row = dtest.get_data()[0].reshape(1, -1)
row
model.predict(row)
"""
Explanation: We can also perform prediction for a single record. The caveat here is that .predict expects a 2d array, hence for single record prediction, we need to reshape it to 2d first.
End of explanation
"""
import json
import requests
# data = {
# "MedInc": 0,
# "HouseAge": 0,
# "AveRooms": 0,
# "AveBedrms": 0,
# "Population": 0,
# "AveOccup": 0,
# "Latitude": 0,
# "Longitude": 0
# }
data = {feature_name: value for feature_name, value in zip(cal_housing.feature_names, dtest.get_data()[0])}
data
"""
Explanation: Calling the API
Before proceeding on to this section, we need to create the service first. Either follow the Docker Container section in the README to host the service locally through a container or power through the Azure Kubernetes Cluster section to host the service on Azure Kubernetes Cluster.
Once we host the service, and can test it using the request library.
End of explanation
"""
# e.g. for local deployment
# url = 'http://127.0.0.1:8000/predict'
# e.g. for local docker deployment
# url = 'http://0.0.0.0:80/predict'
# e.g. for azure kubernetes cluster deployment
url = 'http://13.91.195.109:80/predict'
raw_response = requests.post(url, data=json.dumps(data))
raw_response.raise_for_status()
response = json.loads(raw_response.text)
response
%%timeit
# speed benchmark of the model
model.predict(row)[0]
%%timeit
# speed benchmark of the model hosted as a service
raw_response = requests.post(url, data=json.dumps(data))
raw_response.raise_for_status()
response = json.loads(raw_response.text)
response
"""
Explanation: Change the url accordingly. And pass our features as a json body.
End of explanation
"""
payloads = []
for data in dtest.get_data()[:3]:
payload = {feature_name: value for feature_name, value in zip(cal_housing.feature_names, data)}
payloads.append(payload)
payloads
url = 'http://13.91.195.109:80/batch/predict'
raw_response = requests.post(url, data=json.dumps(payloads))
raw_response.raise_for_status()
response = json.loads(raw_response.text)
response
%%timeit
# speed benchmark of the model hosted as a service using the batch endpoint
raw_response = requests.post(url, data=json.dumps(payloads))
raw_response.raise_for_status()
response = json.loads(raw_response.text)
response
"""
Explanation: We've also implemented the endpoint for supporting batch calls, i.e. to get the scores for multiple records in a single call.
End of explanation
"""
|
ryan-leung/PHYS4650_Python_Tutorial
|
notebooks/05-Python-Functions-Class.ipynb
|
bsd-3-clause
|
def hello(a,b):
return a+b
# Lazy definition of function
hello(1,1)
hello('a','b')
"""
Explanation: Python Functions and Classes
Sometimes you need to define your own functions to work with custom data or solve some problems. A function can be defined with a prefix def. A class is like an umbrella that can contains many data types and functions, it is defined by class prefix.
<a href="https://colab.research.google.com/github/ryan-leung/PHYS4650_Python_Tutorial/blob/master/notebooks/05-Python-Functions-Class.ipynb"><img align="right" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory">
</a>
Functions
End of explanation
"""
class Person:
def __init__(self,age,salary):
self.age = age
self.salary = salary
def out(self):
print(self.age)
print(self.salary)
"""
Explanation: Class
Class is a blueprint defining the charactaristics and behaviors of an object.
python
class MyClass:
...
...
For a simple class, one shall define an instance
python
__init__()
to handle variable when it created. Let's try the following example:
End of explanation
"""
a = Person(30,10000)
a.out()
"""
Explanation: This is a basic class definition, the age and salary are needed when creating this object. The new class can be invoked like this:
End of explanation
"""
# make a list
students = ['boy', 'boy', 'girl', 'boy', 'girl', 'girl', 'boy', 'boy', 'girl', 'girl', 'boy', 'boy']
boys = 0; girls = 0
for s in students:
if s == 'boy':
boys = boys +1
else:
girls+=1
print("boys:", boys)
print("girls:", girls)
"""
Explanation: The __init__ initilaze the variables stored in the class. When they are called inside the class, we should add a self. in front of the variable. The out(Self) method are arbitary functions that can be used by calling Yourclass.yourfunction(). The input to the functions can be added after the self input.
Python Conditionals And Loops
The for statement
The for statement reads
for xxx in yyyy:
yyyy shall be an iteratable, i.e. tuple or list or sth that can be iterate. After this line, user should add an indentation at the start of next line, either by space or tab.
Conditionals
A conditional statement is a programming concept that describes whether a region of code runs based on if a condition is true or false. The keywords involved in conditional statements are if, and optionally elif and else.
End of explanation
"""
def int_sum(n):
s=0; i=1
while i < n:
s += i*i
i += 1
return s
int_sum(1000)
"""
Explanation: The While statement
The While statement reads
while CONDITIONAL:
CONDITIONAL is a conditional statement, like i < 100 or a boolean variable. After this line, user should add an indentation at the start of next line, either by space or tab.
End of explanation
"""
%timeit int_sum(100000)
"""
Explanation: Performance
End of explanation
"""
import numba
@numba.njit
def int_sum_nb(n):
s=0; i=1
while i < n:
s += i*i
i += 1
return s
int_sum_nb(1000)
%timeit int_sum_nb(100000)
"""
Explanation: <img src="images/numba-blue-horizontal-rgb.svg" alt="numba" style="width: 600px;"/>
<img src="images/numba_features.png" alt="numba" style="width: 600px;"/>
Numba translates Python functions to optimized machine code at runtime using the LLVM compiler library. Your functions will be translated to c-code during declarations. To install numba,
python
pip install numba
End of explanation
"""
import random
def monte_carlo_pi(n):
acc = 0
for i in range(n):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / n
monte_carlo_pi(1000000)
%timeit monte_carlo_pi(1000000)
@numba.njit
def monte_carlo_pi_nb(n):
acc = 0
for i in range(n):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / n
monte_carlo_pi_nb(1000000)
%timeit monte_carlo_pi_nb(1000000)
@numba.njit
def monte_carlo_pi_nbmt(n):
acc = 0
for i in numba.prange(n):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / n
monte_carlo_pi_nbmt(1000000)
%timeit monte_carlo_pi_nbmt(1000000)
"""
Explanation: Examples
End of explanation
"""
|
shoyer/qspectra
|
examples/HEOM vs Redfield vs ZOFE.ipynb
|
bsd-2-clause
|
import qspectra as qs
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Parameters of the electronic Hamiltonian
ham = qs.ElectronicHamiltonian(np.array([[12881., 120.], [120., 12719.]]),
bath=qs.DebyeBath(qs.CM_K * 77., 35., 106.),
dipoles=[[1., 0., 0.], [2. * np.cos(.3), 2. * np.sin(.3), 0.]])
# Bath parameters for the Redfield and HEOM models
red_dimer = qs.RedfieldModel(ham, hilbert_subspace='gef', discard_imag_corr=True, unit_convert=qs.CM_FS)
heom_dimer = qs.HEOMModel(ham, hilbert_subspace='gef', unit_convert=qs.CM_FS, level_cutoff=3, low_temp_corr=False)
# Bath parameters for the ZOFE model:
# pseudomode bath fit to the Drude spectral density for FMO for 77K of Ishizaki and Fleming
# (each PM is represented by a Lorentzian at frequency Omega, with width gamma, and of strength huang
# in the bath correlation SPECTRUM, NOT spectral density)
Omega = [-500., -200., -90., 1., 21., 60., 80., 130., 200., 300., 400., 500., 600., 800., 1100., 1500.] # frequencies of PMs
gamma = [500., 100., 50., 50., 50., 50., 80., 40., 80., 150., 200., 200., 80., 250., 200., 300.] # dampings of the PMs
huang = [-2.5133e-03, -7.5398e-03, -2.5133e-02, 5.0265e+01, 2.2619e+00, 4.5239e-02, 2.7646e-01,
9.2991e-03, 2.2619e-02, 1.5080e-02, 3.0159e-03, 3.5186e-03, 2.8274e-04, 1.7593e-03,
4.3982e-04, 4.3982e-04] # Huang-Rhys factors of PMs (couplings to PMs)
n_sites = ham.n_sites
numb_pm = len(Omega)
on = np.ones(n_sites, complex)
Omega = np.array([Omega[pm]*on for pm in range(numb_pm)])
huang = np.array([huang[pm]*on for pm in range(numb_pm)])
gamma = np.array([gamma[pm]*on for pm in range(numb_pm)])
zofe_ham = qs.ElectronicHamiltonian(ham.H('e'),
bath=qs.PseudomodeBath(numb_pm, Omega, gamma, huang),
dipoles=ham.dipoles)
zofe_dimer = qs.ZOFEModel(zofe_ham, hilbert_subspace='ge', unit_convert=qs.CM_FS)
"""
Explanation: Electronic dimer simulated with HEOM, ZOFE, and Redfield
Setup the Hamiltonian and dynamical models for an electronic dimer:
End of explanation
"""
f, X = qs.absorption_spectra(heom_dimer, time_max=10000)
f2, X2 = qs.absorption_spectra(zofe_dimer, time_max=10000)
f3, X3 = qs.absorption_spectra(red_dimer, time_max=10000)
plt.plot(f, X, label='HEOM')
plt.plot(f2, X2, label='ZOFE')
plt.plot(f3, X3, label='Redfield')
plt.xlabel('Frequency (cm$^{-1}$)')
plt.ylabel('Absorption [arb. unit]')
plt.xlim(12500, 13200)
plt.legend();
"""
Explanation: Absorption spectra
End of explanation
"""
%%time
(f1, t2, f3), X = qs.two_dimensional_spectra(red_dimer, coherence_time_max=1000,
population_times=np.linspace(0, 1000, 50),
geometry='-++', polarization='xxxx',
include_signal='GSB,ESE,ESA')
plt.figure(figsize=(8, 8))
plt.contourf(f1, f3, X[:,5,:].real, 30, cmap='RdBu', vmax=6e5, vmin=-6e5)
plt.xlabel('Coherence frequency (cm$^{-1}$)')
plt.ylabel('Rephasing frequency (cm$^{-1}$)')
plt.xlim(12300, 13300)
plt.ylim(12300, 13300);
%%time
(f1, t2, f3), X = qs.two_dimensional_spectra(heom_dimer, coherence_time_max=1000,
population_times=np.linspace(0, 1000, 50),
geometry='-++', polarization='xxxx',
include_signal='GSB,ESE,ESA')
plt.figure(figsize=(8, 8))
plt.contourf(f1, f3, X[:,5,:].real, 30, cmap='RdBu', vmax=6e5, vmin=-6e5)
plt.xlabel('Coherence frequency (cm$^{-1}$)')
plt.ylabel('Rephasing frequency (cm$^{-1}$)')
plt.xlim(12300, 13300)
plt.ylim(12300, 13300);
"""
Explanation: 2D spectra
End of explanation
"""
|
lemonyhermit/CodingYoga
|
python-for-developers/Chapter2/Chapter2_Syntax.ipynb
|
gpl-2.0
|
#!/usr/bin/env python
# A code line that shows the result of 7 times 3
print 7 * 3
"""
Explanation: Python for Developers
First edition
Chapter 2: Syntax
A program written in Python consists of lines, which may continue on the following lines, by using the backslash character (\) at the end of the line or parentheses, brackets or braces in expressions that use such characters.
The character # marks the beginning of a comment. Any text after the # will be ignored until the end of the line, with the exception of functional comments.
Functional comments are used to:
change the encoding of the source file of the program by adding a comment with the text # - * - coding: <encoding> - # - at the beginning of the file, in which <encoding> is the file encoding (usually latin1 or utf-8). Changing encoding is required to support characters that are not part of the English language, in the source code of the program.
define the interpreter that will be used to run the program on UNIX systems, through a comment starting with #! at the beginning of the file, which indicates the path to the interpreter (usually the comment line will be something like#! / usr / bin / env python ).
Example of functional comments:
End of explanation
"""
# A line broken by backslash
a = 7 * 3 + \
5 / 2
# A list (broken by comma)
b = ['a', 'b', 'c',
'd', 'e']
# A function call (broken by comma)
c = range(1,
11)
# Prints everything
print a, b, c
"""
Explanation: Examples of broken lines:
End of explanation
"""
# For i on the list 234, 654, 378, 798:
for i in [234, 654, 378, 798]:
# If the remainder dividing by 3 is equal to zero:
if i % 3 == 0:
# Prints...
print i, '/ 3 =', i / 3
"""
Explanation: The command print inserts spaces between expressions that are received as a parameter, and a newline character at the end, unless it receives a comma at the end of the parameter list.
Blocks
In Python, code blocks are defined by the use of indentation, which should be constant in the code block, but it is considered good practice to maintain consistency throughout the project and avoid mixing tabs and <span class = "note" title = "The official recommendation coding style (http://www.python.org/dev/peps/pep-0008/) is to use four spaces for indentation and this convention is widely accepted by developers.">spaces</span>.
The line before the block always ends with a colon (:) and is a control structure of the language or a statement of a new structure (a function, for example).
Example:
End of explanation
"""
for i in [256, 768, 32, 1894]:
if i % 3 == 0:
print(i, "/ 3 =", i/3)
"""
Explanation: The operator % computes the modulus (remainder of division).
End of explanation
"""
|
monicathieu/cu-psych-r-tutorial
|
content/tutorials/python/2-datacleaning/.ipynb_checkpoints/index-checkpoint.ipynb
|
mit
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
sns.set(style="ticks",font="Arial",font_scale=2)
"""
Explanation: title: "Data Cleaning in Python"
subtitle: "CU Psych Scientific Computing Workshop"
weight: 1201
tags: ["core", "python"]
Goals of this Lesson
Students will learn:
How to open various data types in Python
How to check for missing or problematic data and address issues
How to filter, rearrange and shape data in preparation for analysis
Links to Files
The files for all tutorials can be downloaded from the Columbia Psychology Scientific Computing GitHub page using these instructions. This particular file is located here: /content/tutorials/python/2-datacleaning/index.ipynb.
A Quick Introduction to Python Scientific Computing Modules
As a programming languge, Python can do quite a lot. For example, it is an extremely popular choice for GUI and web-based application development (Reddit, Google, Facebook), databases (Spotify, Netflix), and scientific computing (NASA, for example, but also us!).
One reason that Python is so widely used is due to its extensive library of third-party modules. Let's start by briefly covering the most important modules for scientific computing, some (but not all) of which we'll be using today.
Data Analysis
NumPy: The fundamental package for scientific computing in Python. NumPy provides Python with most of the functionality of MATLAB.
SciPy: Provides many user-friendly and efficient numerical routines such as routines for numerical integration, interpolation, optimization, linear algebra and statistics.
Pandas: Provides high-performance, easy-to-use data structures and data analysis tools. Pandas provides Python with most of the functionality of R.
Data Visualization
Matplotlib: Python 2D plotting library which produces publication quality figures. The pyplot module provides a MATLAB-like interface and is what most people use.
Seaborn: A Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics.
We'll now import a few of these modules using their standard abbreviations.
End of explanation
"""
np.mean([2,4,6])
"""
Explanation: In order to call one of the functions belonging to a particular module, you can use the . syntax. For example, numpy has a mean() function which will compute the arithmetic mean across an axis. If we wanted to call that function, we would simply write:
End of explanation
"""
mylist = [1,2,3]
mylist
"""
Explanation: For those coming from R, this is the equivalent of something like dplyr::filter(). Python is stricter than R about making sure you specify which library the function you are using comes from.
Now that you're familiar with the basics of modules in Python, let's go ahead and move on to some data cleaning.
Python Data Structures
There are a few ways that data can be stored and manipulated in Python, some of which you've already covered.
To review, the first and most basic is a list:
End of explanation
"""
mylist2 = [1,"2",3.0, [4,5]]
mylist2
"""
Explanation: Lists can be arbitrarily long and can store hold multiple types of data, although this isn't usually a good idea:
End of explanation
"""
myarray = np.zeros((10))
myarray
"""
Explanation: Similar to lists, the numpy module provides the ability to work with n-dimensional arrays for numerical data only. We can initialize an array full of zeros using the np.zeros() function:
End of explanation
"""
mymatrix = np.zeros((10,2))
mymatrix
"""
Explanation: If we want to work with numerical data that has two dimensions, we can create a matrix in a very similar way:
End of explanation
"""
mydataframe = pd.DataFrame(mymatrix,columns=["Height","Weight"])
mydataframe
"""
Explanation: We won't be working much with numpy arrays directly today, but you should know that they are often a better option than lists when you are working with numerical data.
Today, we will primarily be working with pandas dataframes. This object provides functionality that is very similar to dataframes in R. Let's start by converting our empty matrix into a dataframe. We can also give each of our columns more informative names:
End of explanation
"""
data = {'name':["Monica","Michelle","Paul","Ellen"]}
data
"""
Explanation: Another way we can create a dataframe is by first creating a dictionary and then converting this to a dataframe. A dictionary is another type of data structure used by Python. Dictionaries consist of an unordered collection of key-value pairs. Keys are used to index the dictionary in order to access the values associated with that key. Let's start by making a simple dictionary with one key and one value:
End of explanation
"""
data['name']
"""
Explanation: If we index this dictionary using the name key, it will return the its value, which is a list of names:
End of explanation
"""
data['score'] = [16,20,19,35]
data['year'] = [2, 5, 2, 1]
data
"""
Explanation: We can also add new key-value pairs to this dictionary:
End of explanation
"""
dataframe = pd.DataFrame(data)
dataframe
"""
Explanation: Similar to how we made a dataframe from our numpy array above, we can easily make a dataframe from this dictionary:
End of explanation
"""
import os
"""
Explanation: Reading Data into Python
It's easy to introduce errors if you are entering data manually like above, and with a lot of data it would get tedious. Most of the time, you'll be reading data from an external file (.txt or .csv), or opening up an existing dataset in Python. Once you find the location of your files, what you do next will depend on the file format.
Reminder about the os module
This module provides a way to interface with the operating system we are running Python on (Windows, Mac, or Linux). Let's start by first loading this module:
End of explanation
"""
os.getcwd()
"""
Explanation: It's always important to check where our working directory is when trying to read data into Python.
End of explanation
"""
os.listdir()
"""
Explanation: You can access a list of everything (all files and directories) within your working directory using the os.listdir() function...
End of explanation
"""
#help(pd.read_table)
mydata = pd.read_table("Study1.csv", sep=",")
"""
Explanation: ...as well as in the "Files" tab on the left-hand side of the JupyterLab window.
What kind of file do you have?
For .txt, .csv, or any kind of delimited (such as tab-delimited) file, you can use the pandas function read_table():
End of explanation
"""
# get the number of rows and columns
mydata.shape
# get the names of columns
mydata.columns
# take a peek at the first few rows
mydata.head()
"""
Explanation: If you know you have a .csv file, another common option is read_csv(), which has a default comma separator.
Remember, all of these commands can have arguments that will help Python make sense of your data. To find out what arguments are possible, you can use the help() function like we did above to look at what read_table() does.
To do this, just put whatever command you would like to learn about inside of help() (e.g. help(pd.read_table)). Remember that for functions associated with a particular module you will need to tell Python which module they come from using the . syntax.
You can always also Google a function to quickly find this information.
Inspecting your data
Now that you have data, it's time to get some results! But wait! Are you sure this data is OK? Doing some basic steps to inspect your data now can save you lots of headaches later, and Python makes it really easy.
Start by checking that you have the expected number of rows and columns in your data frame. You can do this by by asking Python:
End of explanation
"""
mydata = mydata.rename({'Personality':'Neuroticism'}, axis="columns")
mydata.head()
"""
Explanation: Rename a variable
Now that we've loaded our data into Python and have made sure it makes sense, we can start manipulating and cleaning it.
Look back at your dataframe. What is the fifth variable? What does that even mean? Luckily, this is your study and you know that it's a personality questionnaire measuring neuroticism. Let's fix that name and make it more intuitive:
End of explanation
"""
mydata = mydata.rename({'T1':'Day1',
'T2':'Day2',
'T3':'Day3',
'T4':'Day4'}, axis="columns")
mydata.head()
"""
Explanation: We can also rename multiple variables at once:
End of explanation
"""
# here we add a column where are the values are the same string
mydata['studyName'] = 'study1'
# here we add a column 'random' of 50 unique random numbers
mydata['random'] = np.random.random(50)
mydata.head()
"""
Explanation: Adding a new column
Often we'll want to add some new data into a dataframe.
End of explanation
"""
mydata = mydata.drop(['random', 'studyName'], axis = 1)
"""
Explanation: For those coming from R, the Python syntax for referencing columns as df["columnName"] is roughly equivalent to using R's $ operator.
Removing Columns
We can remove columns with the .drop() function
End of explanation
"""
# indexing a single column
ids = mydata[['ID']]
ids.head()
# indexing multiple columns
mydata_subset = mydata[['ID','Age','Neuroticism']]
mydata_subset.head()
"""
Explanation: Indexing a Dataframe
Sometimes you might want to look at only a subset of the columns in a dataframe (for example, when there are many variables). Doing this with a pandas dataframe is relatively straightforward:
End of explanation
"""
mydata.loc[0:2, ['Age']]
"""
Explanation: Using .loc and .iloc to index DataFrames
If we want to pull out or manipulate specific pieces of dataframes, we can use the .loc[] and .iloc[] functions.
With both functions, the data referenced is always formatted as [selection of rows, selection of columns].
.loc[] takes selections of rows from named columns.
So, here we're asking for elements 0:2 from the Age column:
End of explanation
"""
mydata.loc[mydata['Age'] > 24, ['Age']]
"""
Explanation: We can also use conditional logic to select rows. Here, we ask for all elements in the Age column that are above 24:
End of explanation
"""
mydata.iloc[3:7, 1:4]
"""
Explanation: .iloc[] takes selections of rows and columns using numeric indices.
End of explanation
"""
mydata.isnull()
#mydata.isnull().values.any()
"""
Explanation: Check for missing data
One problem you may have is missing data. Sometimes this is something you already know about, but you should check your data frame anyway to make sure nothing got missed in a data entry error. For small datasets, you can do this visually, but for larger ones you can ask Python.
End of explanation
"""
# Verify that this row contains the missing data
mydata.loc[mydata["ID"]==39]
# Replace row, column with the value 30
mydata.loc[mydata["ID"]==39, "Age"] = 30
# Verify that the replacement worked
mydata.loc[mydata["ID"]==39]
"""
Explanation: In this case, the missing value is the Age value in row 38. You know you have this info somewhere on a paper form, so you go dig it up and want to replace it.
End of explanation
"""
mydata['Sex'].head()
"""
Explanation: Check for correct values
Let's take a look at the Sex variable:
End of explanation
"""
mydata["Sex"].unique()
"""
Explanation: It looks like there are two categories here, but let's double check. We can use the unique() function to list all of the unique values in a column:
End of explanation
"""
mydata["Sex"] = mydata["Sex"].replace('Femle', 'Female')
# Verify that the replacement worked
mydata["Sex"].unique()
"""
Explanation: Here we see another data entry problem. At least one of the rows has a third category label that should really be another case of "Female". Let's replace this label using the replace() function:
End of explanation
"""
mydata["Age"].hist();
mydata["Neuroticism"].hist();
"""
Explanation: Now let's look at some of the continuous variables. You can also look at these by indexing them individually, but sometimes it's easier to visualize. The hist() function, which creates histograms, is good here.
End of explanation
"""
upper = np.mean(mydata["Neuroticism"]) + 3*np.std(mydata["Neuroticism"])
lower = np.mean(mydata["Neuroticism"]) - 3*np.std(mydata["Neuroticism"])
"""
Explanation: Looks like we have a potential outlier on the Neuroticism score. This could be an entry error, but it could also be a real value that just happens to be really low. This is why data inspection is so important for later analysis — now that you know that the value is there, it's up to you to decide how to deal with it.
Filtering data
Let's say we have decided a prori to exclude outliers 3SD above or below the mean. We will first define these boundaries:
End of explanation
"""
mydata = mydata[(mydata["Neuroticism"] > lower) & (mydata["Neuroticism"] < upper)]
"""
Explanation: We can now use conditional indexing to exclude all rows with a Neuroticism score above or below these values:
End of explanation
"""
# Verify that we excluded 1 outlier
mydata.shape
mydata["Neuroticism"].hist();
"""
Explanation: The line above says: return only the Neuroticism values greater than the lower boundary and less than the upper boundary and then save it in the mydata variable.
End of explanation
"""
mydata['ConditionF'] = mydata['Condition'].replace([0,1], ['Control','Treatment'])
# Verify that your variable is now recoded as you'd like
mydata[['Condition','ConditionF']].head()
"""
Explanation: Getting Ready for Analysis
Now that we've gone through and cleaned up the problems, you can think ahead to how you'll want to use this data.
Recoding variables
Sometimes we want to treat categorical variables as factors, but sometimes we want to pretend they're numeric (as in a regression, when binary variables can be coded as 0 and 1). Right now, Condition is coded as a binary numeric variable, but that's not very informative, so you'd rather have the values be descriptive. Here, the function replace() is again useful:
End of explanation
"""
from scipy.stats import zscore
mydata['NeuroticismZ'] = zscore(mydata['Neuroticism'])
mydata['NeuroticismZ'].hist();
"""
Explanation: Calculating new variables
You may also want to recalculate or rescale some variables. For example, we can turn Neuroticism into a z-score, or calculate an average response across the four time points.
To compute a z-score, we can use the zscore() function from the scipy.stats module:
End of explanation
"""
mydata['DayMean'] = mydata[['Day1','Day2','Day3','Day4']].mean(axis="columns")
mydata['DayMean'].hist();
"""
Explanation: To calculate the means across each day, we can use the mean() function from pandas on a dataframe that has been indexed to include only data from the four days:
End of explanation
"""
# first load the followup dataset
mydata2 = pd.read_csv("Study1_Followup.csv")
"""
Explanation: Combining data from multiple sources
Sometimes, data might be spread across multiple files, and you'll want to combine those for your analysis. For example, maybe this study had a follow-up survey on Day 30. Scores from that survey were entered into another spreadsheet, which only has the subject ID and that score. We want to include that score into our data.
End of explanation
"""
mydata = mydata.merge(mydata2,on="ID")
mydata.head()
"""
Explanation: We can use the function merge() to combine the two dataframes. To make sure the data matches up, we use the on argument to specify that IDs should match. That way even if the data is in a different order, scores will match together correctly.
End of explanation
"""
value_cols = ["Day1","Day2","Day3","Day4"] # columns we would like to convert to a single "long" column
id_cols = list(mydata.columns) # columns we would like to stay in the same "wide" format
for i in value_cols:
id_cols.remove(i)
"""
Explanation: Shaping data
Finally, you may want to change the layout of your data. Right now, our dataframe is in "wide" format, which means that each row is a subject, and each observation gets its own column. For some analyses, you'll need to use "long" format, where each row is an observation, and columns specify things like Time and ID to differentiate the observations. For this, we can use the melt() function in pandas:
End of explanation
"""
mydata_Long = pd.melt(mydata,id_vars=id_cols,var_name="Time",value_vars=value_cols,value_name="Score")
mydata_Long.head()
"""
Explanation: Wide → Long
End of explanation
"""
mydata_Wide = mydata_Long.pivot_table(values="Score", index=id_cols, columns='Time').reset_index()
mydata_Wide.columns.name = None
mydata_Wide.head()
"""
Explanation: Long → Wide
We can go back in the other direction by using the pivot_table() function in pandas:
End of explanation
"""
# write data to a .csv
mydata.to_csv("Study1_clean.csv",index = False)
"""
Explanation: Saving Your Work
Once you've created a data cleaning script like this one, you'll have a record of all the edits you've made on the raw data, and you can recreate your cleaned data just by running the script again. However, it's often easier to save your cleaned data as its own file (never overwrite the raw data), so when you come back to do analysis you don't have to bother with all the cleaning steps.
You can always save data frames as a .csv for easy sharing and viewing outside of Python.
End of explanation
"""
|
phuongxuanpham/SelfDrivingCar
|
CarND-TensorFlow-Lab/lab.ipynb
|
gpl-3.0
|
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
"""
Explanation: <h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from Introduction to TensorFlow to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in differents font.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "All modules imported".
End of explanation
"""
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
"""
Explanation: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
End of explanation
"""
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(X):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
# feature range [a, b]
a, b = 0.1, 0.9
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (b - a) + a
return X_scaled
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
"""
Explanation: <img src="image/mean_variance.png" style="height: 75%;width: 75%; position: relative; right: 5%">
Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the normalize() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
If you're having trouble solving problem 1, you can view the solution here.
End of explanation
"""
print(os.getcwd())
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
"""
Explanation: Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
End of explanation
"""
features_count = 784
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), axis=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
"""
Explanation: <img src="image/weight_biases.png" style="height: 60%;width: 60%; position: relative; right: 10%">
Problem 2
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors:
- features
- Placeholder tensor for feature data (train_features/valid_features/test_features)
- labels
- Placeholder tensor for label data (train_labels/valid_labels/test_labels)
- weights
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">tf.truncated_normal() documentation</a> for help.
- biases
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> tf.zeros() documentation</a> for help.
If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available here.
End of explanation
"""
# TODO: Find the best parameters for each configuration
epochs = 1
batch_size = 50
learning_rate = 0.01
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
"""
Explanation: <img src="image/learn_rate_tune.png" style="height: 60%;width: 60%">
Problem 3
Below are 3 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* Epochs: 1
* Batch Size:
* 2000 --> 0.098
* 1000 --> 0.144
* 500 --> 0.253
* 300 --> 0.305
* 50 --> 0.672
* Learning Rate: 0.01
Configuration 2
* Epochs: 1
* Batch Size: 100
* Learning Rate:
* 0.8 --> 0.098
* 0.5 --> 0.776
* 0.1 --> 0.746
* 0.05 --> 0.724
* 0.01 --> 0.572
Configuration 3
* Epochs:
* 1 --> 0.753
* 2 --> 0.098
* 3 --> 0.777
* 4 --> 0.783
* 5 --> 0.789
* Batch Size: 100
* Learning Rate: 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
If you're having trouble solving problem 3, you can view the solution here.
End of explanation
"""
# TODO: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
epochs = 5
batch_size = 50
learning_rate = 0.5
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
"""
Explanation: Test
Set the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
End of explanation
"""
|
ThierryMondeel/FBA_python_tutorial
|
FBA_tutorials/extra_exploring_ecoli_core.ipynb
|
mit
|
import cobra
from cobra.flux_analysis import pfba
import pandas as pd # for nice tables
pd.set_option('display.max_colwidth', -1)
from utils import show_map
import escher
map_loc = './maps/e_coli_core.Core metabolism.json' # the escher map used below
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
M = cobra.io.load_json_model('models/e_coli_core.json')
model = M.copy() # this way we can edit model but leave M unaltered
"""
Explanation: Additional material: Investigating the E. coli core metabolic map
http://doi.org/10.1128/ecosalplus.10.2.1
This notebook serves either as additional material for advanced students. Or as the basis for a student project.
<span style="color:red">Assignment:</span> Read the paper describing the e. coli core model
End of explanation
"""
b = show_map([],map_loc)
b.display_in_notebook()
sol = model.optimize()
b = show_map(sol,map_loc)
b.display_in_notebook()
"""
Explanation: <span style="color:red">Assignment:</span> Examine the network.
End of explanation
"""
|
cuttlefishh/emp
|
code/04-subsets-prevalence/matches_deblur_to_gg_silva.ipynb
|
bsd-3-clause
|
!source activate qiime
import re
import sys
"""
Explanation: author: jonsan@gmail.com<br>
date: 9 Oct 2017<br>
language: Python 3.5<br>
license: BSD3<br>
matches_deblur_to_gg_silva.ipynb
End of explanation
"""
def fix_silva(silva_fp, output_fp):
with open(output_fp, 'w') as f_o:
with open(silva_fp, 'r') as f_i:
is_target = False
for l in f:
if l.startswith('>'):
is_target = False
if l.split(' ')[1].startswith('Bacteria'):
is_target = True
if l.split(' ')[1].startswith('Archaea'):
is_target = True
if is_target:
f_o.write(l.rstrip() + '\n')
else:
seq = l.rstrip()
if is_target:
f_o.write(seq.replace('U','T') + '\n')
return
silva_db_99_fp = 'SILVA_128_SSURef_Nr99_tax_silva.fasta'
silva_db_100_fp = 'SILVA_128_SSURef_tax_silva.fasta'
fix_silva(silva_db_99_fp, 'SILVA_128_SSURef_Nr99_tax_silva.prok.fasta')
fix_silva(silva_db_100_fp, 'SILVA_128_SSURef_tax_silva.prok.fasta')
"""
Explanation: Remove eukaryotic sequences from Silva DB
Also change 'U' to 'T' to match DNA sequences from EMP
End of explanation
"""
cmd = ('vsearch --usearch_global /projects/emp/03-otus/04-deblur/emp.90.min25.deblur.seq.fa '
'--id 1.0 '
'--maxaccepts 1000 '
'--maxrejects 32 '
'--db /home/jgsanders/ref_data/gg_13_8_otus/rep_set/99_otus.fasta '
'--uc ~/emp/mapping/Ghits_99_all.uc '
'--dbnotmatched /home/jgsanders/emp/mapping/dbs/gg_99_otus.unmatched.all.fasta '
'--dbmatched /home/jgsanders/emp/mapping/dbs/gg_99_otus.matched.all.fasta '
'--notmatched /home/jgsanders/emp/mapping/Ghits_99_unmatched.all.fasta '
'--matched /home/jgsanders/emp/mapping/Ghits_99_matched.all.fasta')
!echo "source activate qiime; $cmd" | qsub -k eo -N gg99 -l nodes=1:ppn=32 -l pmem=4gb -l walltime=12:00:00
cmd = ('vsearch --usearch_global /projects/emp/03-otus/04-deblur/emp.90.min25.deblur.seq.fa '
'--id 1.0 '
'--maxaccepts 1000 '
'--maxrejects 32 '
'--db /home/jgsanders/ref_data/gg_13_8_otus/gg_13_5.fasta '
'--uc ~/emp/mapping/Ghits_100_all.uc '
'--dbnotmatched /home/jgsanders/emp/mapping/dbs/gg_100_otus.unmatched.all.fasta '
'--dbmatched /home/jgsanders/emp/mapping/dbs/gg_100_otus.matched.all.fasta '
'--notmatched /home/jgsanders/emp/mapping/Ghits_100_unmatched.all.fasta '
'--matched /home/jgsanders/emp/mapping/Ghits_100_matched.all.fasta')
!echo "source activate qiime; $cmd" | qsub -k eo -N gg100 -l nodes=1:ppn=32 -l pmem=4gb -l walltime=12:00:00
cmd = ('vsearch --usearch_global /projects/emp/03-otus/04-deblur/emp.90.min25.deblur.seq.fa '
'--id 1.0 '
'--maxaccepts 1000 '
'--maxrejects 32 '
'--db /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_Nr99_tax_silva.prok.fasta '
'--uc ~/emp/mapping/Shits_99_all.uc '
'--dbnotmatched /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_Nr99_tax_silva.prok.unmatched.all.fasta '
'--dbmatched /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_Nr99_tax_silva.prok.matched.all.fasta '
'--notmatched /home/jgsanders/emp/mapping/Shits_99_unmatched.all.fasta '
'--matched /home/jgsanders/emp/mapping/Shits_99_matched.all.fasta')
!echo "source activate qiime; $cmd" | qsub -k eo -N silva99 -l nodes=1:ppn=32 -l pmem=4gb -l walltime=12:00:00
cmd = ('vsearch --usearch_global /projects/emp/03-otus/04-deblur/emp.90.min25.deblur.seq.fa '
'--id 1.0 '
'--maxaccepts 1000 '
'--maxrejects 32 '
'--db /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_tax_silva.prok.fasta '
'--uc ~/emp/mapping/Shits_100_all.uc '
'--dbnotmatched /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_tax_silva.prok.unmatched.all.fasta '
'--dbmatched /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_tax_silva.prok.matched.all.fasta '
'--notmatched /home/jgsanders/emp/mapping/Shits_100_unmatched.fasta '
'--notmatched /home/jgsanders/emp/mapping/Shits_100_unmatched.all.fasta '
'--matched /home/jgsanders/emp/mapping/Shits_100_matched.all.fasta')
!echo "source activate qiime; $cmd" | qsub -k eo -N silva100 -l nodes=1:ppn=32 -l pmem=4gb -l walltime=12:00:00
"""
Explanation: Search deblur against all (or at least a max of 1000 identical) hits
Commands are given in a qsub framework for submission to a Torque cluster
End of explanation
"""
#get number of original seqs
deblur_seqs = !grep -c '>' /projects/emp/03-otus/04-deblur/emp.90.min25.deblur.seq.fa
gg_99_seqs = !grep -c '>' /home/jgsanders/ref_data/gg_13_8_otus/rep_set/99_otus.fasta
silva_99_seqs = !grep -c '>' /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_Nr99_tax_silva.prok.fasta
gg_100_seqs = !grep -c '>' /home/jgsanders/ref_data/gg_13_8_otus/gg_13_5.fasta
silva_100_seqs = !grep -c '>' /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_tax_silva.prok.fasta
print('Deblur seqs: {0}\nGG 99 seqs: {1}\nSILVA 99 seqs: {2}\n'
'GG 100 seqs: {3}\nSILVA 100 seqs: {4}'.format(deblur_seqs[0], gg_99_seqs[0], silva_99_seqs[0],
gg_100_seqs[0], silva_100_seqs[0]))
#get number of gg 100 seqs matched
deblur_matched_gg = !grep -c '>' /home/jgsanders/emp/mapping/Ghits_100_matched.all.fasta
gg_matched_deblur = !grep -c '>' /home/jgsanders/emp/mapping/dbs/gg_100_otus.matched.all.fasta
print('GG 100 seqs with Deblur hits: {0} ({1:03.1f}%)'.format(gg_matched_deblur[0], float(gg_matched_deblur[0])/float(gg_100_seqs[0])*100))
print('Deblur seqs matching GG 100: {0} ({1:03.1f}%)'.format(deblur_matched_gg[0], float(deblur_matched_gg[0])/float(deblur_seqs[0])*100))
#get number of gg 99 seqs matched
deblur_matched_gg = !grep -c '>' /home/jgsanders/emp/mapping/Ghits_99_matched.all.fasta
gg_matched_deblur = !grep -c '>' /home/jgsanders/emp/mapping/dbs/gg_99_otus.matched.all.fasta
print('GG 99 seqs with Deblur hits: {0} ({1:03.1f}%)'.format(gg_matched_deblur[0], float(gg_matched_deblur[0])/float(gg_99_seqs[0])*100))
print('Deblur seqs matching GG 99: {0} ({1:03.1f}%)'.format(deblur_matched_gg[0], float(deblur_matched_gg[0])/float(deblur_seqs[0])*100))
#get number of silva 100 seqs matched
deblur_matched_silva = !grep -c '>' /home/jgsanders/emp/mapping/Shits_100_matched.all.fasta
silva_matched_deblur = !grep -c '>' /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_tax_silva.prok.matched.all.fasta
print('Silva 100 seqs with Deblur hits: {0} ({1:03.1f}%)'.format(silva_matched_deblur[0], float(silva_matched_deblur[0])/float(silva_100_seqs[0])*100))
print('Deblur seqs matching Silva 100: {0} ({1:03.1f}%)'.format(deblur_matched_silva[0], float(deblur_matched_silva[0])/float(deblur_seqs[0])*100))
#get number of silva 99 seqs matched
deblur_matched_silva = !grep -c '>' /home/jgsanders/emp/mapping/Shits_99_matched.all.fasta
silva_matched_deblur = !grep -c '>' /home/jgsanders/emp/mapping/dbs/SILVA_128_SSURef_Nr99_tax_silva.prok.matched.all.fasta
print('Silva 99 seqs with Deblur hits: {0} ({1:03.1f}%)'.format(silva_matched_deblur[0], float(silva_matched_deblur[0])/float(silva_99_seqs[0])*100))
print('Deblur seqs matching Silva 99: {0} ({1:03.1f}%)'.format(deblur_matched_silva[0], float(deblur_matched_silva[0])/float(deblur_seqs[0])*100))
"""
Explanation: Counting stats for the more exhaustive search
End of explanation
"""
import pandas as pd
gg100_df = pd.read_csv('../Ghits_100_all.uc',sep='\t',header=None)
gg100_hits = set(gg100_df[8])
silva100_df = pd.read_csv('../Shits_100.uc',sep='\t',header=None)
silva100_hits = set(silva100_df[8])
len(gg100_hits)
len(silva100_hits)
len(gg100_hits | silva100_hits)
len(silva100_hits - gg100_hits)
len(gg100_hits - silva100_hits)
"""
Explanation: Are matching sets nonredundant?
End of explanation
"""
|
transcranial/keras-js
|
notebooks/layers/convolutional/UpSampling1D.ipynb
|
mit
|
data_in_shape = (3, 5)
L = UpSampling1D(size=2)
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(230)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.UpSampling1D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: UpSampling1D
[convolutional.UpSampling1D.0] size 2 upsampling on 3x5 input
End of explanation
"""
data_in_shape = (4, 4)
L = UpSampling1D(size=3)
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(231)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.UpSampling1D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: [convolutional.UpSampling1D.1] size 3 upsampling on 4x4 input
End of explanation
"""
import os
filename = '../../../test/data/layers/convolutional/UpSampling1D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
"""
Explanation: export for Keras.js tests
End of explanation
"""
|
karlstroetmann/Formal-Languages
|
ANTLR4-Python/Interpreter/Interpreter.ipynb
|
gpl-2.0
|
!type Pure.g4
!cat -n Pure.g4
"""
Explanation: An Interpreter for a Simple Programming Language
$\neg$, $\wedge$, $\vee$
In this notebook we develop an interpreter for a small programming language.
The grammar for this language is stored in the file Pure.g4.
End of explanation
"""
!type sum.sl
!cat sum.sl
"""
Explanation: The grammar shown above does only contain skip actions. The corrsponding grammar that is enriched with actions is stored in the file Simple.g4.
An example program that conforms to this grammar is stored in the file sum.sl.
End of explanation
"""
!type Simple.g4
!cat -n Simple.g4
"""
Explanation: The file Simple.g4 contains a parser for the language described by the grammar Pure.g4. This parser returns
an abstract syntax tree. This tree is represented as a nested tuple.
End of explanation
"""
!type sum.ast
!cat sum.ast
!antlr4 -Dlanguage=Python3 Simple.g4
from SimpleLexer import SimpleLexer
from SimpleParser import SimpleParser
import antlr4
%run ../AST-2-Dot.ipynb
"""
Explanation: The parser shown above will transform the program sum.sl into the nested tuple stored in the file sum.ast.
End of explanation
"""
def main(file):
with open(file, 'r') as handle:
program_text = handle.read()
input_stream = antlr4.InputStream(program_text)
lexer = SimpleLexer(input_stream)
token_stream = antlr4.CommonTokenStream(lexer)
parser = SimpleParser(token_stream)
result = parser.program()
Statements = result.stmnt_list
ast = tuple2dot(Statements)
print(Statements)
display(ast)
ast.render('ast', view=True)
execute_tuple(Statements)
"""
Explanation: The function main takes one parameter file. This parameter is a string specifying the name of file containing a program.
The function reads this program and executes it.
End of explanation
"""
def execute_tuple(Statement_List, Values={}):
for stmnt in Statement_List:
execute(stmnt, Values)
"""
Explanation: The function execute_tuple takes two arguments:
- Statement_List is a list of statements,
- Values is a dictionary assigning integer values to variable names.
The function executes the statements in Statement_List. If an assignment statement is executed,
the dictionary Values is updated.
End of explanation
"""
L = [1, 2, 3, 4, 5]
a, b, *R = L
a, b, R
def execute(stmnt, Values):
op = stmnt[0]
if stmnt == 'program':
pass
elif op == ':=':
_, var, value = stmnt
Values[var] = evaluate(value, Values)
elif op == 'print':
_, expr = stmnt
print(evaluate(expr, Values))
elif op == 'if':
_, test, *SL = stmnt
if evaluate(test, Values):
execute_tuple(SL, Values)
elif op == 'while':
_, test, *SL = stmnt
while evaluate(test, Values):
execute_tuple(SL, Values)
else:
assert False, f'{stmnt} unexpected'
"""
Explanation: The function execute takes two arguments:
- stmnt is a statement,
- Values is a dictionary assigning values to variable names.
The function executes the statements in Statement_List. If an assignment statement is executed,
the dictionary Values is updated.
The following trick can be used to split a list into its components.
End of explanation
"""
def evaluate(expr, Values):
if isinstance(expr, int):
return expr
if isinstance(expr, str):
return Values[expr]
op = expr[0]
if op == 'read':
return int(input('Please enter a natural number:'))
if op == '==':
_, lhs, rhs = expr
return evaluate(lhs, Values) == evaluate(rhs, Values)
if op == '<':
_, lhs, rhs = expr
return evaluate(lhs, Values) < evaluate(rhs, Values)
if op == '+':
_, lhs, rhs = expr
return evaluate(lhs, Values) + evaluate(rhs, Values)
if op == '-':
_, lhs, rhs = expr
return evaluate(lhs, Values) - evaluate(rhs, Values)
if op == '*':
_, lhs, rhs = expr
return evaluate(lhs, Values) * evaluate(rhs, Values)
if op == '/':
_, lhs, rhs = expr
return evaluate(lhs, Values) / evaluate(rhs, Values)
assert False, f'{expr} unexpected'
!type sum.sl
!cat sum.sl
main('sum.sl')
!type factorial.sl
!cat factorial.sl
main('factorial.sl')
!del *.py *.tokens *.interp
!del *.pdf
!del ast
!rmdir /Q /S __pycache__
!dir /B
!rm *.py *.tokens *.interp
!rm ast
!rm -r __pycache__/
!rm *.pdf
!ls
"""
Explanation: The function evaluate takes two arguments:
- expr is a logical expression or an arithmetic expression,
- Values is a dictionary assigning integer values to variable names.
The function evaluates the given expression and returns this value.
End of explanation
"""
|
makism/dyfunconn
|
tutorials/EEG - 0 - Retrieve and parse.ipynb
|
bsd-3-clause
|
import numpy as np
import pyedflib # please check the "requirements.txt" file
import tqdm
import pathlib
import os
"""
Explanation: Go through all subjects from the dataset, read the EDF files and store them into NumPy arrays.
Notes
In addition to the module's dependacies, please consult the file requirements.txt found in the current folder.
In some subjects, we drop the last 170 samples, to make sure equal number of samples across subjects.
End of explanation
"""
curr_dir = pathlib.Path("./")
edf_dir = (curr_dir / "raw_data/").resolve()
if not edf_dir.exists():
try:
edf_dir.mkdir()
except Exeption as err:
print(err)
else:
print(f"\"{edf_dir}\" already exists.")
# Skip fetching the data if the notebook run on Binder.
host = os.environ.get("BINDER_LAUNCH_HOST", None)
if host is None or host != "https://mybinder.org/":
!wget -P "$edf_dir" -c https://physionet.org/static/published-projects/eegmmidb/eeg-motor-movementimagery-dataset-1.0.0.zip
!unzip "$edf_dir"/eeg-motor-movementimagery-dataset-1.0.0.zip -d "$edf_dir/eeg-motor-movementimagery-dataset/"
"""
Explanation: Fetch the dataset
Define the directory where dataset is located
End of explanation
"""
dataset_root = f"{edf_dir}/eeg-motor-movementimagery-dataset/files"
n_subjects = 106
n_rois = 64
n_samples = 9600
eyes_open = np.zeros((n_subjects, n_rois, n_samples))
eyes_closed = np.zeros((n_subjects, n_rois, n_samples))
"""
Explanation: Prepare dataset
End of explanation
"""
for sub_id in tqdm.tqdm(range(n_subjects)):
subj_prefix = f"S{sub_id + 1:03}"
subj_dir = f"{dataset_root}/{subj_prefix}"
baseline_eyes_open = f"{subj_dir}/{subj_prefix}R01"
edf = pyedflib.EdfReader(baseline_eyes_open + ".edf")
annot = edf.read_annotation()
n_signals = edf.signals_in_file
signal_labels = edf.getSignalLabels()
for chan in np.arange(n_signals):
eyes_open[sub_id, chan, :] = edf.readSignal(chan)[0:9600]
"""
Explanation: Parse the baseline files for "eyes open"
End of explanation
"""
for sub_id in tqdm.tqdm(range(n_subjects)):
subj_prefix = f"S{sub_id + 1:03}"
subj_dir = f"{dataset_root}/{subj_prefix}"
baseline_eyes_open = f"{subj_dir}/{subj_prefix}R02"
edf = pyedflib.EdfReader(baseline_eyes_open + ".edf")
annot = edf.read_annotation()
n_signals = edf.signals_in_file
signal_labels = edf.getSignalLabels()
for chan in np.arange(n_signals):
eyes_closed[sub_id, chan, :] = edf.readSignal(chan)[0:9600]
"""
Explanation: Parse the baseline files for "eyes closed"
End of explanation
"""
store_dir = (curr_dir / "data/").resolve()
if not store_dir.exists():
try:
store_dir.mkdir()
except Exeption as err:
print(err)
else:
print(f"\"{store_dir}\" already exists.")
np.save(f'{store_dir}/eeg_eyes_opened.npy', eyes_open)
np.save(f'{store_dir}/eeg_eyes_closed.npy', eyes_closed)
"""
Explanation: Dump arrays
End of explanation
"""
|
kit-cel/wt
|
nt1/vorlesung/1_quellencodierung/Uniform_Quantization_Sine.ipynb
|
gpl-2.0
|
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# plotting options
font = {'size' : 20}
plt.rc('font', **font)
plt.rc('text', usetex=matplotlib.checkdep_usetex(True))
matplotlib.rc('figure', figsize=(18, 6) )
"""
Explanation: Illustration of Uniform Quantization
This code is provided as supplementary material of the lecture Quellencodierung.
This code illustrates
* Uniform scalar quantization of a sinusoidal signal
End of explanation
"""
sr = 22050 # sampling rate
T = 1.0 # seconds
t = np.linspace(0, T, int(T*sr), endpoint=False) # time variable
x = np.sin(2*np.pi*2*t) # pure sine wave at 2 Hz
"""
Explanation: Generate artificial signal
$$
b[k] = \sin\left(2\pi\frac{2k}{f_s}\right),\qquad k = 0,\ldots,f_s
$$
End of explanation
"""
# Sample to 4 bit ... 16 quantization levels
w = 4
# fix x_max based on the current signal, leave some tiny room
x_max = np.max(x) + 1e-10
Delta_x = x_max / (2**(w-1))
xh_max = (2**w-1)*Delta_x/2
# Quantize
xh_uniform_midrise = np.sign(x)*Delta_x*(np.floor(np.abs(x)/Delta_x)+0.5)
# saturation
xh_uniform_midrise[np.where(x > x_max)] = xh_max
xh_uniform_midrise[np.where(x < -x_max)] = -xh_max
plt.figure(figsize=(9, 9))
plt.subplot(3,1,1)
plt.plot(range(len(t)),x, c=(0,150/255, 130/255))
plt.autoscale(enable=True, axis='x', tight=True)
#plt.title('Original')
plt.xlabel('$k$')
plt.ylabel('$b[k]$')
plt.ylim((-1.1,+1.1))
plt.subplot(3,1,2)
plt.plot(range(len(t)),xh_uniform_midrise, c=(0,150/255, 130/255))
plt.autoscale(enable=True, axis='x', tight=True)
#plt.title('Quantized')
plt.xlabel('$k$')
plt.ylabel(r'$\bar{b}[k]$')
plt.ylim((-1.1,+1.1))
plt.subplot(3,1,3)
plt.plot(range(len(t)),xh_uniform_midrise - x, c=(0,150/255, 130/255))
plt.autoscale(enable=True, axis='x', tight=True)
#plt.title('Quantization error signal')
plt.xlabel('$k$')
plt.ylabel('$e[k]$')
plt.ylim((-1.1,+1.1))
plt.tight_layout()
plt.savefig('figure_Sine_quantizer.pdf',bbox_inches='tight')
"""
Explanation: Uniform Quantization. The quantizer is given by
$$
\bar{b}[k] = \mathrm{sign}(b[k])\cdot \Delta \cdot \left(\left\lfloor\frac{|b[k]|}{\Delta}\right\rfloor+\frac{1}{2}\right)
$$
where $\lfloor x \rfloor$ denotes the larges integer smaller or equal than $x$.
End of explanation
"""
|
mrustl/flopy
|
examples/Notebooks/flopy3_swi2package_ex1.ipynb
|
bsd-3-clause
|
%matplotlib inline
import os
import platform
import numpy as np
import matplotlib.pyplot as plt
import flopy.modflow as mf
import flopy.utils as fu
import flopy.plot as fp
"""
Explanation: FloPy
SWI2 Example 1. Rotating Interface
This example problem is the first example problem in the SWI2 documentation (http://pubs.usgs.gov/tm/6a46/) and simulates transient movement of a freshwater\seawater interface separating two density zones in a two-dimensional vertical plane. The problem domain is 250 m long, 40 m high, and 1 m wide. The aquifer is confined, storage changes are not considered (all MODFLOW stress periods are steady-state), and the top and bottom of the aquifer are horizontal and impermeable.
The domain is discretized into 50 columns, 1 row, and 1 layer, with respective cell dimensions of 5 m (DELR), 1 m (DELC), and 40 m. A constant head of 0 m is specified for column 50. The hydraulic conductivity is 2 m/d and the effective porosity (SSZ) is 0.2. A flow of 1 m$^3$/d of seawater is specified in column 1 and causes groundwater to flow from left to right
in the model domain.
The domain contains one freshwater zone and one seawater zone, separated by an active ZETA surface between the zones (NSRF=1) that approximates the 50-percent seawater salinity contour. A 400-day period is simulated using a constant time step of 2 days. Fluid density is represented using the stratified option (ISTRAT=1) and the elevation of the interface is output every 100 days (every 50 time steps). The densities, $\rho$, of the freshwater and saltwater are 1,000 and 1,025 kg/m$^3$, respectively. Hence, the dimensionless densities, $\nu$, of the freshwater and saltwater are 0.0 and 0.025, respectively. The maximum slope of the toe and tip is specified as 0.2 (TOESLOPE=TIPSLOPE=0.2), and default tip/toe parameters are used (ALPHA=BETA=0.1). Initially, the interface is at a 45$^{\circ}$ angle from (x,z) = (80,0) to (x,z) = (120,-40). The source/sink terms (ISOURCE) are specified to be freshwater everywhere (ISOURCE=1) except in cell 1 where saltwater enters the model and ISOURCE equals 2. A comparison of results for SWI2 and the exact Dupuit solution of Wilson and Sa Da Costa (1982) are presented below. The constant flow from left to right results in an average velocity of 0.125 m/d. The exact Dupuit solution is a rotating straight interface of which the center moves to the right with this velocity
Import numpy and matplotlib, set all figures to be inline, import flopy.modflow and flopy.utils.
End of explanation
"""
modelname = 'swiex1'
#Set name of MODFLOW exe
# assumes executable is in users path statement
exe_name = 'mf2005'
if platform.system() == 'Windows':
exe_name = 'mf2005.exe'
workspace = os.path.join('data')
#make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
ml = mf.Modflow(modelname, version='mf2005', exe_name=exe_name, model_ws=workspace)
"""
Explanation: Define model name of your model and the location of MODFLOW executable. All MODFLOW files and output will be stored in the subdirectory defined by the workspace. Create a model named ml and specify that this is a MODFLOW-2005 model.
End of explanation
"""
nlay = 1
nrow = 1
ncol = 50
delr = 5.
delc = 1.
nper, perlen, nstp = 1, 400., 200
discret = mf.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=0, botm=-40.0,
steady=True, nper=nper, perlen=perlen, nstp=nstp)
"""
Explanation: Define the number of layers, rows and columns, and the cell size along the rows (delr) and along the columns (delc). Then create a discretization file. Specify the top and bottom of the aquifer. The heads are computed quasi-steady state (hence a steady MODFLOW run) while the interface will move. There is one stress period with a length of 400 days and 200 steps (so one step is 2 days).
End of explanation
"""
ibound = np.ones((nrow, ncol))
ibound[0, -1] = -1
bas = mf.ModflowBas(ml, ibound=ibound, strt=0.0)
"""
Explanation: All cells are active (ibound=1), while the last cell is fixed head (ibound=-1). The starting values of the head are not important, as the heads are computed every time with a steady run.
End of explanation
"""
lpf = mf.ModflowLpf(ml, hk=2., laytyp=0, layavg=0)
"""
Explanation: Define the hydraulic conductivity. The aquifer is confined (laytype=0) and the intercell hydraulic conductivity is the harmonic meand (layavg=0).
End of explanation
"""
wel = mf.ModflowWel(ml, stress_period_data = {0:[[0, 0, 0, 1]]} )
"""
Explanation: Inflow on the right side of the model is 1 m$^3$/d (layer 0, row 0, column 0, discharge 1)
End of explanation
"""
spd = {}
for istp in range(49, nstp+1, 50):
spd[(0, istp)] = ['save head', 'print budget']
spd[(0, istp+1)] = []
oc = mf.ModflowOc(ml, stress_period_data=spd)
pcg = mf.ModflowPcg(ml)
"""
Explanation: Define the output control to save heads and interface every 50 steps, and define the pcg solver with default arguments.
End of explanation
"""
z = np.zeros((nrow, ncol), np.float)
z[0, 16:24] = np.arange(-2.5, -40, -5)
z[0, 24:] = -40
z = [z] # zeta needs to be
isource = np.ones((nrow, ncol), np.int)
isource[0, 0] = 2
#
swi = mf.ModflowSwi2(ml, nsrf=1, istrat=1, toeslope=0.2, tipslope=0.2, nu=[0, 0.025],
zeta=z, ssz=0.2, isource=isource, nsolver=1)
"""
Explanation: The intial interface is straight. The isource is one everywhere (inflow and outflow is fresh (zone 1)) except for the first cell (index=0) which has saltwater inflow (zone 2).
End of explanation
"""
ml.write_input()
ml.run_model(silent=True)
"""
Explanation: Write the MODFLOW input files and run the model
End of explanation
"""
# read model heads
hfile = fu.HeadFile(os.path.join(ml.model_ws, modelname+'.hds'))
head = hfile.get_alldata()
# read model zeta
zfile = fu.CellBudgetFile(os.path.join(ml.model_ws, modelname+'.zta'))
kstpkper = zfile.get_kstpkper()
zeta = []
for kk in kstpkper:
zeta.append(zfile.get_data(kstpkper=kk, text='ZETASRF 1')[0])
zeta = np.array(zeta)
"""
Explanation: Load the head and zeta data from the file
End of explanation
"""
plt.figure(figsize=(16,6))
# define x-values of xcells and plot interface
x = np.arange(0, ncol*delr, delr) + delr/2.
label = ['SWI2','_','_','_'] # labels with an underscore are not added to legend
for i in range(4):
zt = np.ma.masked_outside(zeta[i,0,0,:], -39.99999, -0.00001)
plt.plot(x, zt, 'r-', lw=1,
zorder=10, label=label[i])
# Data for the Wilson - Sa da Costa solution
k = 2.0
n = 0.2
nu = 0.025
H = 40.0
tzero = H * n / (k * nu) / 4.0
Ltoe = np.zeros(4)
v = 0.125
t = np.arange(100, 500, 100)
label = ['Wilson and Sa Da Costa (1982)','_','_','_'] # labels with an underscore are not added to legend
for i in range(4):
Ltoe[i] = H * np.sqrt(k * nu * (t[i] + tzero) / n / H)
plt.plot([100 - Ltoe[i] + v * t[i], 100 + Ltoe[i] + v * t[i]], [0, -40], '0.75',
lw=8, zorder=0, label=label[i])
# Scale figure and add legend
plt.axis('scaled')
plt.xlim(0, 250)
plt.ylim(-40, 0)
plt.legend(loc='best');
"""
Explanation: Make a graph and add the solution of Wilson and Sa da Costa
End of explanation
"""
fig = plt.figure(figsize=(16, 3))
ax = fig.add_subplot(1, 1, 1)
modelxsect = fp.ModelCrossSection(model=ml, line={'Row': 0})
label = ['SWI2','_','_','_']
for k in range(zeta.shape[0]):
modelxsect.plot_surface(zeta[k, :, :, :], masked_values=[0, -40.],
color='red', lw=1, label=label[k])
linecollection = modelxsect.plot_grid()
ax.set_title('ModelCrossSection.plot_surface()')
# Data for the Wilson - Sa da Costa solution
k = 2.0
n = 0.2
nu = 0.025
H = 40.0
tzero = H * n / (k * nu) / 4.0
Ltoe = np.zeros(4)
v = 0.125
t = np.arange(100, 500, 100)
label = ['Wilson and Sa Da Costa (1982)','_','_','_'] # labels with an underscore are not added to legend
for i in range(4):
Ltoe[i] = H * np.sqrt(k * nu * (t[i] + tzero) / n / H)
ax.plot([100 - Ltoe[i] + v * t[i], 100 + Ltoe[i] + v * t[i]], [0, -40], 'blue',
lw=1, zorder=0, label=label[i])
# Scale figure and add legend
ax.axis('scaled')
ax.set_xlim(0, 250)
ax.set_ylim(-40, 0)
ax.legend(loc='best');
"""
Explanation: Use ModelCrossSection plotting class and plot_surface() method to plot zeta surfaces.
End of explanation
"""
fig = plt.figure(figsize=(16, 3))
ax = fig.add_subplot(1, 1, 1)
modelxsect = fp.ModelCrossSection(model=ml, line={'Row': 0})
modelxsect.plot_fill_between(zeta[3, :, :, :])
linecollection = modelxsect.plot_grid()
ax.set_title('ModelCrossSection.plot_fill_between()');
"""
Explanation: Use ModelCrossSection plotting class and plot_fill_between() method to fill between zeta surfaces.
End of explanation
"""
X, Y = np.meshgrid(x, [0, -40])
zc = fp.SwiConcentration(model=ml)
conc = zc.calc_conc(zeta={0:zeta[3,:,:,:]}) / 0.025
print(conc[0, 0, :])
v = np.vstack((conc[0, 0, :], conc[0, 0, :]))
plt.imshow(v, extent=[0, 250, -40, 0], cmap='Reds')
cb = plt.colorbar(orientation='horizontal')
cb.set_label('percent seawater');
plt.contour(X, Y, v, [0.75, 0.5, 0.25], linewidths=[2, 1.5, 1], colors='black');
"""
Explanation: Convert zeta surfaces to relative seawater concentrations
End of explanation
"""
|
Hugovdberg/timml
|
notebooks/timml_notebook4_sol.ipynb
|
mit
|
%matplotlib inline
import numpy as np
from timml import *
figsize = (6, 6)
z = [20, 15, 10, 8, 6, 5.5, 5.2, 4.8, 4.4, 4, 2, 0]
ml = Model3D(kaq=10, z=z, kzoverkh=0.1)
ls1 = LineSinkDitch(ml, x1=-100, y1=0, x2=100, y2=0, Qls=10000, order=5, layers=6)
ls2 = HeadLineSinkString(ml, [(200, -1000), (200, -200), (200, 0), (200, 200), (200, 1000)], hls=40, order=5, layers=0)
rf = Constant(ml, xr=-1000, yr=0, hr=42, layer=0)
print(ls1.hls)
print(ls2.hls)
"""
Explanation: TimML Notebook 4
Horizontal well
A horizontal well is located in a 20 m thick aquifer; the hydraulic conductivity is $k = 10$ m/d and the vertical
anisotropy factor is 0.1. The horizontal well is placed 5 m above the bottom of the aquifer. The well has
a discharge of 10,000 m$^3$/d and radius of $r=0.2$ m. The well is 200 m long and runs from $(x, y) = (−100, 0)$
to $(x, y) = (100, 0)$. A long straight river with a head of 40 m runs to the right of the horizontal well along the line
$x = 200$. The head is fixed to 42 m at $(x, y) = (−1000, 0)$.
Three-dimensional flow to the horizontal well is modeled by dividing the aquifer up in 11 layers; the
elevations are: [20, 15, 10, 8, 6, 5.5, 5.2, 4.8, 4.4, 4, 2, 0]. At the depth of the well, the layer thickness is equal to
the diameter of the well, and it increases in the layers above and below the well. A TimML model is created with the Model3D
command. The horizontal well is located in layer 6 and is modeled with the LineSinkDitch element. Initially, the entry resistance of the well is set to zero.
End of explanation
"""
ml.solve()
"""
Explanation: Questions:
Exercise 4a
Solve the model.
End of explanation
"""
ml.contour(win=[-150, 150, -150, 150], ngr=[50, 100], layers = [0, 6],
figsize=figsize)
print('The head at the top and in layer 6 are:')
print(ml.head(0, 0.2, [0, 6]))
"""
Explanation: Exercise 4b
Create contour plots of layers 0 and 6 and note the difference between the layers. Also,
compute the head at $(x, y) = (0, 0.2)$ (on the edge of the well) and notice that there is a very large head
difference between the top of the aquifer and the well.
End of explanation
"""
ml.plot(win=[-1000, 1000, -1000, 1000], orientation='both', figsize=figsize)
ml.tracelines(xstart=[-500, -500, -500], ystart=[-500, -500, -500], zstart=[5, 9, 15],
hstepmax=20, tmax=10 * 365.25, orientation='both', color='C0')
ml.tracelines(xstart=[250, 250, 250], ystart=[50, 50, 50], zstart=[5, 9, 15],
hstepmax=20, tmax=10 * 365.25, orientation='both', color='C1')
"""
Explanation: Exercise 4c
Draw a number of pathlines from different elevations using the tracelines command. First make a plot with a cross section below it.
End of explanation
"""
ml.vcontour(win=[-200, 300, 0, 0], n=50, levels=20, figsize=(6,6))
"""
Explanation: Exercise 4d
Make a contour plot of the heads in a vertical cross-section using the vcontour command. Use a cross-section along the well.
End of explanation
"""
print('head inside w/o resistance:')
print(ls1.headinside())
ml = Model3D(kaq=10, z=z, kzoverkh=0.1)
ls = LineSinkDitch(ml, x1=-100, y1=0, x2=100, y2=0, Qls=10000, order=5, layers=6, wh=0.4, res=0.01)
HeadLineSinkString(ml, [(200, -1000), (200, -200), (200, 0), (200, 200), (200, 1000)],
hls=40, order=5, layers=0)
rf = Constant(ml, xr=-1000, yr=0, hr=42, layer=0)
ml.solve()
print('head inside horizontal well:', ls.headinside())
ml.vcontour(win=[-200, 300, 0, 0], n=50, levels=20, vinterp=False)
"""
Explanation: Exercise 4e
Change the entry resistance of the horizontal well to 0.01 days and set the width to 0.4 m and resolve the model. Notice the difference in the head inside the horizontal well with the headinside function of the horizontal well. Use a
End of explanation
"""
|
google/applied-machine-learning-intensive
|
content/05_deep_learning/01_recurrent_neural_networks/colab.ipynb
|
apache-2.0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: <a href="https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/05_deep_learning/01_recurrent_neural_networks/colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2020 Google LLC.
End of explanation
"""
! chmod 600 kaggle.json && (ls ~/.kaggle 2>/dev/null || mkdir ~/.kaggle) && mv kaggle.json ~/.kaggle/ && echo 'Done'
"""
Explanation: Recurrent Neural Networks (RNNs)
Recurrent Neural Networks (RNNs) are an interesting application of deep learning that allow models to predict the future. While regression models attempt to fit an equation to existing data and extend the predictive power of the equation into the future, RNNs fit a model and use sequences of time series data to make step-by-step predictions about the next most likely output of the model.
In this colab we will create a recurrent neural network that can predict engine vibrations.
Exploratory Data Analysis
We'll use the Engine Vibrations data from Kaggle. This dataset contains artificial engine vibration values we will use to train a model that can predict future values.
To load the data, upload your kaggle.json file and run the code block below.
End of explanation
"""
!kaggle datasets download joshmcadams/engine-vibrations
!ls
"""
Explanation: Next, download the data from Kaggle.
End of explanation
"""
import pandas as pd
df = pd.read_csv('engine-vibrations.zip')
df.describe()
"""
Explanation: Now load the data into a DataFrame.
End of explanation
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(24, 8))
plt.plot(list(range(len(df['mm']))), df['mm'])
plt.show()
"""
Explanation: We know the data contains readings of engine vibration over time. Let's see how that looks on a line chart.
End of explanation
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(24, 8))
plt.plot(list(range(100)), df['mm'].iloc[:100])
plt.show()
"""
Explanation: That's quite a tough chart to read. Let's sample it.
End of explanation
"""
df.isna().any()
"""
Explanation: See if any of the data is missing.
End of explanation
"""
import seaborn as sns
_ = sns.boxplot(df['mm'])
"""
Explanation: Finally, we'll do a box plot to see if the data is evenly distributed, which it is.
End of explanation
"""
import numpy as np
X = []
y = []
sseq_len = 50
for i in range(0, len(df['mm']) - sseq_len - 1):
X.append(df['mm'][i:i+sseq_len])
y.append(df['mm'][i+sseq_len+1])
y = np.array(y)
X = np.array(X)
X.shape, y.shape
"""
Explanation: There is not much more EDA we need to do at this point. Let's move on to modeling.
Preparing the Data
Currently we have a series of data that contains a single list of vibration values over time. When training our model and when asking for predictions, we'll want to instead feed the model a subset of our sequence.
We first need to determine our subsequence length and then create in-order subsequences of that length.
We'll create a list of lists called X that contains subsequences. We'll also create a list called y that contains the next value after each subsequence stored in X.
End of explanation
"""
X = np.expand_dims(X, axis=2)
y = np.expand_dims(y, axis=1)
X.shape, y.shape
"""
Explanation: We also need to explicitly set the final dimension of the data in order to have it pass through our model.
End of explanation
"""
data_std = df['mm'].std()
data_mean = df['mm'].mean()
X = (X - data_mean) / data_std
y = (y - data_mean) / data_std
X.max(), y.max(), X.min(), y.min()
"""
Explanation: We'll also standardize our data for the model. Note that we don't normalize here because we need to be able to reproduce negative values.
End of explanation
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
"""
Explanation: And for final testing after model training, we'll split off 20% of the data.
End of explanation
"""
import math
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[sseq_len, 1]),
keras.layers.Dense(1)
])
model.compile(
loss='mse',
optimizer='Adam',
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=50, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: Setting a Baseline
We are only training with 50 data points at a time. This is well within the bounds of what a standard deep neural network can handle, so let's first see what a very simple neural network can do.
End of explanation
"""
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.SimpleRNN(1, input_shape=[None, 1])
])
model.compile(
loss='mse',
optimizer='Adam',
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=50, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: We quickly converged and, when we ran the model, we got a baseline quality value of 0.03750885081060467.
The Most Basic RNN
Let's contrast a basic feedforward neural network with a basic RNN. To do this we simply need to use the SimpleRNN layer in our network in place of the Dense layer in our network above. Notice that, in this case, there is no need to flatten the data before we feed it into the model.
End of explanation
"""
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.SimpleRNN(50, return_sequences=True, input_shape=[None, 1]),
keras.layers.SimpleRNN(20, return_sequences=True),
keras.layers.SimpleRNN(10, return_sequences=True),
keras.layers.SimpleRNN(1)
])
model.compile(
loss='mse',
optimizer='Adam',
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=50, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: Our model converged a little more slowly, but it got an error of only 0.8974118571865628, which is not an improvement over the baseline model.
A Deep RNN
Let's try to build a deep RNN and see if we can get better results.
In the model below, we stick together four layers ranging in width from 50 nodes to our final output of 1.
Notice all of the layers except the output layer have return_sequences=True set. This causes the layer to pass outputs for all timestamps to the next layer. If you don't include this argument, only the output for the last timestamp is passed, and intermediate layers will complain about the wrong shape of input.
End of explanation
"""
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.SimpleRNN(2, return_sequences=True, input_shape=[None, 1]),
keras.layers.Dropout(0.3),
keras.layers.SimpleRNN(1),
keras.layers.Dense(1)
])
model.compile(
loss='mse',
optimizer='Adam',
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=50, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: Woah! What happened? Our MSE during training looked nice: 0.0496. But our final testing didn't perform much better than our simple model. We seem to have overfit!
We can try to simplify the model and add dropout layers to reduce overfitting, but even with a very basic model like the one below, we still get very different MSE between the training and test datasets.
End of explanation
"""
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.LSTM(1, input_shape=[None, 1]),
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.Adam(),
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=100, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: Even with these measures, we still seem to be overfitting a bit. We could keep tuning, but let's instead look at some other types of neurons found in RNNs.
Long Short Term Memory
The RNN layers we've been using are basic neurons that have a very short memory. They tend to learn patterns that they have recently seen, but they quickly forget older training data.
The Long Short Term Memory (LSTM) neuron was built to combat this forgetfulness. The neuron outputs values for the next layer in the network, and it also outputs two other values: one for short-term memory and one for long-term memory. These weights are then fed back into the neuron at the next iteration of the network. This backfeed is similar to that of a SimpleRNN, except the SimpleRNN only has one backfeed.
We can replace the SimpleRNN with an LSTM layer, as you can see below.
End of explanation
"""
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.LSTM(20, return_sequences=True, input_shape=[None, 1]),
keras.layers.Dropout(0.2),
keras.layers.LSTM(10),
keras.layers.Dropout(0.2),
keras.layers.Dense(1)
])
model.compile(
loss='mse',
optimizer='Adam',
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=50, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: We got a test RMSE of 0.8989123704842217, which is still not better than our SimpleRNN. And in the more complex model below, we got close to the baseline but still didn't beat it.
End of explanation
"""
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.GRU(1),
])
model.compile(
loss='mse',
optimizer='Adam',
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=50, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: LSTM neurons can be very useful, but as we have seen, they aren't always the best option.
Let's look at one more neuron commonly found in RNN models, the GRU.
Gated Recurrent Unit
The Gated Recurrent Unit (GRU) is another special neuron that often shows up in Recurrent Neural Networks. The GRU is similar to the LSTM in that it feeds output back into itself. The difference is that the GRU feeds a single weight back into itself and then makes long- and short-term state adjustments based on that single backfeed.
The GRU tends to train faster than LSTM and has similar performance. Let's see how a network containing one GRU performs.
End of explanation
"""
tf.random.set_seed(0)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding="valid",
input_shape=[None, 1]),
keras.layers.GRU(2, input_shape=[None, 1], activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(1),
])
model.compile(
loss='mse',
optimizer='Adam',
metrics=['mae', 'mse'],
)
stopping = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=2)
history = model.fit(X_train, y_train, epochs=50, callbacks=[stopping])
y_pred = model.predict(X_test)
rmse = math.sqrt(np.mean(keras.losses.mean_squared_error(y_test, y_pred)))
print("RMSE Scaled: {}\nRMSE Base Units: {}".format(
rmse, rmse * data_std + data_mean))
plt.figure(figsize=(10,10))
plt.plot(list(range(len(history.history['mse']))), history.history['mse'])
plt.show()
"""
Explanation: We got a RMSE of 0.9668634342193015, which isn't bad, but it still performs worse than our baseline.
Convolutional Layers
Convolutional layers are limited to image classification models. They can also be really handy when training RNNs. For training on a sequence of data, we use the Conv1D class as shown below.
End of explanation
"""
# Your code goes here
"""
Explanation: Recurrent Neural Networks are a powerful tool for sequence generation and prediction. But they aren't the only mechanism for sequence prediction. If the sequence you are predicting is short enough, then a standard deep neural network might be able to provide the predictions you are looking for.
Also note that we created a model that took a series of data and output one value. It is possible to create RNNs that input one or more values and output one or more values. Each use case is different.
Exercises
Exercise 1: Visualization
Create a plot containing a series of at least 50 predicted points. Plot that series against the actual.
Hint: Pick a sequence of 100 values from the original data. Plot data points 50-100 as the actual line. Then predict 50 single values starting with the features 0-49, 1-50, etc.
Student Solution
End of explanation
"""
# Your code goes here
"""
Explanation: Exercise 2: Stock Price Prediction
Using the Stonks! dataset, create a recurrent neural network that can predict the stock price for the 'AAA' ticker. Calculate your RMSE with some holdout data.
Use as many text and code cells as you need to complete this exercise.
Hint: if predicting absolute prices doesn't yield a good model, look into other ways to represent the day-to-day change in data.
End of explanation
"""
|
jasonkitbaby/udacity-homework
|
student_intervention/student_intervention.ipynb
|
apache-2.0
|
# 载入所需要的库
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score
# 载入学生数据集
student_data = pd.read_csv("student-data.csv")
print "Student data read successfully!"
"""
Explanation: 机器学习工程师纳米学位
监督学习
项目 2: 搭建一个学生干预系统
欢迎来到机器学习工程师纳米学位的第二个项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以'练习'开始的标题表示接下来的代码部分中有你必须要实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示!
除了实现代码外,你还必须回答一些与项目和你的实现有关的问题。每一个需要你回答的问题都会以'问题 X'为标题。请仔细阅读每个问题,并且在问题后的'回答'文字框中写出完整的答案。我们将根据你对问题的回答和撰写代码所实现的功能来对你提交的项目进行评分。
提示:Code 和 Markdown 区域可通过 Shift + Enter 快捷键运行。此外,Markdown可以通过双击进入编辑模式。
问题 1 - 分类 vs. 回归
在这个项目中你的任务是找出那些如果不给予帮助,最终可能无法毕业的学生。你觉得这个问题是哪种类型的监督学习问题,是分类问题还是回归问题?为什么?
答案:
分析数据
运行下面区域的代码以载入学生数据集,以及一些此项目所需的Python库。注意数据集的最后一列'passed'是我们的预测的目标(表示学生是毕业了还是没有毕业),其他的列是每个学生的属性。
End of explanation
"""
# TODO: 计算学生的数量
n_students = None
# TODO: 计算特征数量
n_features = None
# TODO: 计算通过的学生数
n_passed = None
# TODO: 计算未通过的学生数
n_failed = None
# TODO: 计算通过率
grad_rate = None
# 输出结果
print "Total number of students: {}".format(n_students)
print "Number of features: {}".format(n_features)
print "Number of students who passed: {}".format(n_passed)
print "Number of students who failed: {}".format(n_failed)
print "Graduation rate of the class: {:.2f}%".format(grad_rate)
"""
Explanation: 练习: 分析数据
我们首先通过调查数据,以确定有多少学生的信息,并了解这些学生的毕业率。在下面的代码单元中,你需要完成如下的运算:
- 学生的总数, n_students。
- 每个学生的特征总数, n_features。
- 毕业的学生的数量, n_passed。
- 未毕业的学生的数量, n_failed。
- 班级的毕业率, grad_rate, 用百分数表示(%)。
End of explanation
"""
# 提取特征列
feature_cols = list(student_data.columns[:-1])
# 提取目标列 ‘passed’
target_col = student_data.columns[-1]
# 显示列的列表
print "Feature columns:\n{}".format(feature_cols)
print "\nTarget column: {}".format(target_col)
# 将数据分割成特征数据和目标数据(即X_all 和 y_all)
X_all = student_data[feature_cols]
y_all = student_data[target_col]
# 通过打印前5行显示特征信息
print "\nFeature values:"
print X_all.head()
"""
Explanation: 数据准备
在这个部分中,我们将要为建模、训练和测试准备数据
识别特征和目标列
你获取的数据中通常都会包含一些非数字的特征,这会导致一些问题,因为大多数的机器学习算法都会期望输入数字特征进行计算。
运行下面的代码单元将学生数据分成特征和目标列看一看他们中是否有非数字特征。
End of explanation
"""
def preprocess_features(X):
''' 预处理学生数据,将非数字的二元特征转化成二元值(0或1),将分类的变量转换成虚拟变量
'''
# 初始化一个用于输出的DataFrame
output = pd.DataFrame(index = X.index)
# 查看数据的每一个特征列
for col, col_data in X.iteritems():
# 如果数据是非数字类型,将所有的yes/no替换成1/0
if col_data.dtype == object:
col_data = col_data.replace(['yes', 'no'], [1, 0])
# 如果数据类型是类别的(categorical),将它转换成虚拟变量
if col_data.dtype == object:
# 例子: 'school' => 'school_GP' and 'school_MS'
col_data = pd.get_dummies(col_data, prefix = col)
# 收集转换后的列
output = output.join(col_data)
return output
X_all = preprocess_features(X_all)
print "Processed feature columns ({} total features):\n{}".format(len(X_all.columns), list(X_all.columns))
"""
Explanation: 预处理特征列
正如你所见,我们这里有几个非数值的列需要做一定的转换!它们中很多是简单的yes/no,比如internet。这些可以合理地转化为1/0(二元值,binary)值。
其他的列,如Mjob和Fjob,有两个以上的值,被称为_分类变量(categorical variables)_。处理这样的列的推荐方法是创建和可能值一样多的列(如:Fjob_teacher,Fjob_other,Fjob_services等),然后将其中一个的值设为1另外的设为0。
这些创建的列有时候叫做 虚拟变量(dummy variables),我们将用pandas.get_dummies()函数来完成这个转换。运行下面代码单元的代码来完成这里讨论的预处理步骤。
End of explanation
"""
# TODO:在这里导入你可能需要使用的另外的功能
# TODO:设置训练集的数量
num_train = None
# TODO:设置测试集的数量
num_test = X_all.shape[0] - num_train
# TODO:把数据集混洗和分割成上面定义的训练集和测试集
X_train = None
X_test = None
y_train = None
y_test = None
# 显示分割的结果
print "Training set has {} samples.".format(X_train.shape[0])
print "Testing set has {} samples.".format(X_test.shape[0])
"""
Explanation: 实现: 将数据分成训练集和测试集
现在我们已经将所有的 分类的(categorical) 特征转换成数值了。下一步我们将把数据(包括特征和对应的标签数据)分割成训练集和测试集。在下面的代码单元中,你需要完成下列功能:
- 随机混洗切分数据(X_all, y_all) 为训练子集和测试子集。
- 使用300个数据点作为训练集(约76%),使用95个数据点作为测试集(约24%)。
- 如果可能的话,为你使用的函数设置一个random_state。
- 将结果存储在X_train, X_test, y_train和 y_test中。
End of explanation
"""
def train_classifier(clf, X_train, y_train):
''' 用训练集训练分类器 '''
# 开始计时,训练分类器,然后停止计时
start = time()
clf.fit(X_train, y_train)
end = time()
# Print the results
print "Trained model in {:.4f} seconds".format(end - start)
def predict_labels(clf, features, target):
''' 用训练好的分类器做预测并输出F1值'''
# 开始计时,作出预测,然后停止计时
start = time()
y_pred = clf.predict(features)
end = time()
# 输出并返回结果
print "Made predictions in {:.4f} seconds.".format(end - start)
return f1_score(target.values, y_pred, pos_label='yes')
def train_predict(clf, X_train, y_train, X_test, y_test):
''' 用一个分类器训练和预测,并输出F1值 '''
# 输出分类器名称和训练集大小
print "Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(X_train))
# 训练一个分类器
train_classifier(clf, X_train, y_train)
# 输出训练和测试的预测结果
print "F1 score for training set: {:.4f}.".format(predict_labels(clf, X_train, y_train))
print "F1 score for test set: {:.4f}.".format(predict_labels(clf, X_test, y_test))
"""
Explanation: 训练和评价模型
在这个部分,你将选择3个适合这个问题并且在scikit-learn中已有的监督学习的模型。首先你需要说明你选择这三个模型的原因,包括这些数据集有哪些特点,每个模型的优点和缺点各是什么。然后,你需要将这些模型用不同大小的训练集(100个数据点,200个数据点,300个数据点)进行训练,并用F<sub>1</sub>的值来衡量。你需要制作三个表,每个表要显示训练集大小,训练时间,预测时间,训练集上的F<sub>1</sub>值和测试集上的F<sub>1</sub>值(每个模型一个表)。
这是目前 scikit-learn 里有的监督学习模型,你可以从中选择:
- Gaussian Naive Bayes (GaussianNB) 朴素贝叶斯
- Decision Trees 决策树
- Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)
- K-Nearest Neighbors (KNeighbors)
- Stochastic Gradient Descent (SGDC)
- Support Vector Machines (SVM) 向量模型机
- Logistic Regression 逻辑回归
问题 2 - 应用模型
列出三个适合这个问题的监督学习算法模型。每一个你选择的模型:
描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)
这个模型的优势是什么?他什么情况下表现最好?
这个模型的缺点是什么?什么条件下它表现很差?
根据我们当前数据集的特点,为什么这个模型适合这个问题。
回答:
准备
运行下面的代码单元以初始化三个帮助函数,这三个函数将能够帮你训练和测试你上面所选择的三个监督学习算法。这些函数是:
- train_classifier - 输入一个分类器和训练集,用数据来训练这个分类器。
- predict_labels - 输入一个训练好的分类器、特征以及一个目标标签,这个函数将帮你做预测并给出F<sub>1</sub>的值.
- train_predict - 输入一个分类器以及训练集和测试集,它可以运行train_clasifier和predict_labels.
- 这个函数将分别输出训练集的F<sub>1</sub>值和测试集的F<sub>1</sub>值
End of explanation
"""
# TODO:从sklearn中引入三个监督学习模型
# from sklearn import model_A
# from sklearn import model_B
# from skearln import model_C
# TODO:初始化三个模型
clf_A = None
clf_B = None
clf_C = None
# TODO:设置训练集大小
X_train_100 = None
y_train_100 = None
X_train_200 = None
y_train_200 = None
X_train_300 = None
y_train_300 = None
# TODO:对每一个分类器和每一个训练集大小运行'train_predict'
# train_predict(clf, X_train, y_train, X_test, y_test)
"""
Explanation: 练习: 模型评价指标
借助于上面定义的函数,你现在需要导入三个你选择的监督学习模型,然后为每一个模型运行train_predict函数。请记住,对于每一个模型你需要在不同大小的训练集(100,200和300)上进行训练和测试。所以,你在下面应该会有9个不同的输出(每个模型都有训练集大小不同的三个输出)。在接下来的代码单元中,你将需要实现以下功能:
- 引入三个你在上面讨论过的监督式学习算法模型。
- 初始化三个模型并将它们存储在clf_A, clf_B 和 clf_C中。
- 如果可能对每一个模型都设置一个random_state。
- 注意: 这里先使用每一个模型的默认参数,在接下来的部分中你将需要对某一个模型的参数进行调整。
- 创建不同大小的训练集用来训练每一个模型。
- 不要再混洗和再分割数据!新的训练集要取自X_train和y_train.
- 对于每一个模型要用不同大小的训练集来训练它,然后在测试集上做测试(总共需要9次训练测试)
注意: 在下面的代码单元后面我们提供了三个表用来存储你的结果。
End of explanation
"""
# TODO: 导入 'GridSearchCV' 和 'make_scorer'
# TODO:创建你希望调整的参数列表
parameters = None
# TODO:初始化分类器
clf = None
# TODO:用'make_scorer'创建一个f1评分函数
f1_scorer = None
# TODO:在分类器上使用f1_scorer作为评分函数运行网格搜索
grid_obj = None
# TODO: Fit the grid search object to the training data and find the optimal parameters
# TODO:用训练集训练grid search object来寻找最佳参数
grid_obj = None
# Get the estimator
# 得到预测的结果
clf = grid_obj.best_estimator_
# Report the final F1 score for training and testing after parameter tuning
# 输出经过调参之后的训练集和测试集的F1值
print "Tuned model has a training F1 score of {:.4f}.".format(predict_labels(clf, X_train, y_train))
print "Tuned model has a testing F1 score of {:.4f}.".format(predict_labels(clf, X_test, y_test))
"""
Explanation: 结果表格
编辑下面的表格看看在Markdown中如何设计一个表格。你需要把上面的结果记录在表格中。
分类器 1 - ?
| 训练集大小 | 训练时间 | 预测时间 (测试) | F1值 (训练) | F1值 (测试) |
| :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: |
| 100 | | | | |
| 200 | EXAMPLE | | | |
| 300 | | | | EXAMPLE |
分类器 2 - ?
| 训练集大小 | 训练时间 | 预测时间 (测试) | F1值 (训练) | F1值 (测试) |
| :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: |
| 100 | | | | |
| 200 | EXAMPLE | | | |
| 300 | | | | EXAMPLE |
分类器 3 - ?
| 训练集大小 | 训练时间 | 预测时间 (测试) | F1值 (训练) | F1值 (测试) |
| :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: |
| 100 | | | | |
| 200 | EXAMPLE | | | |
| 300 | | | | EXAMPLE |
选择最佳模型
在最后这一部分中,你将从三个监督学习模型中选择一个用在学生数据上的最佳模型。然后你将在最佳模型上用全部的训练集(X_train和y_train)运行一个网格搜索算法,在这个过程中,你要至少调整一个参数以提高模型的F<sub>1</sub>值(相比于没有调参的模型的分值有所提高)。
问题 3 - 选择最佳模型
给予你上面做的实验,用一到两段话,向(学校)监事会解释你将选择哪个模型作为最佳的模型。哪个模型在现有的数据,有限的资源、开支和模型表现综合来看是最好的选择?
回答:
问题 4 - 用通俗的语言解释模型
用一到两段话,向(学校)监事会用外行也听得懂的话来解释最终模型是如何工作的。你需要解释所选模型的主要特点。例如,这个模型是怎样被训练的,它又是如何做出预测的。避免使用高级的数学或技术术语,不要使用公式或特定的算法名词。
回答:
练习: 模型调参
细调选择的模型的参数。使用网格搜索(GridSearchCV)来至少调整模型的重要参数(至少调整一个),这个参数至少需给出并尝试3个不同的值。你要使用整个训练集来完成这个过程。在接下来的代码单元中,你需要实现以下功能:
- 导入 sklearn.model_selection.GridSearchCV 和 sklearn.metrics.make_scorer.
- 创建一个对于这个模型你希望调整参数的字典。
- 例如: parameters = {'parameter' : [list of values]}。
- 初始化你选择的分类器,并将其存储在clf中。
- 使用make_scorer 创建F<sub>1</sub>评分函数并将其存储在f1_scorer中。
- 需正确设定参数pos_label的值!
- 在分类器clf上用f1_scorer 作为评价函数运行网格搜索,并将结果存储在grid_obj中。
- 用训练集(X_train, y_train)训练grid search object,并将结果存储在grid_obj中。
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.14/_downloads/plot_stats_cluster_time_frequency.ipynb
|
bsd-3-clause
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
"""
Explanation: Non-parametric between conditions cluster statistic on single trial power
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists in:
extracting epochs for 2 conditions
compute single trial power estimates
baseline line correct the power estimates (power ratios)
compute stats to see if the power estimates are significantly different
between conditions.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
"""
Explanation: Set parameters
End of explanation
"""
decim = 2
frequencies = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, frequencies,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, frequencies,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
"""
Explanation: Factor to downsample the temporal dimension of the TFR computed by
tfr_morlet. Decimation occurs after frequency decomposition and can
be used to reduce memory usage (and possibly comptuational time of downstream
operations such as nonparametric statistics) if you don't need high
spectrotemporal resolution.
End of explanation
"""
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2],
n_permutations=100, threshold=threshold, tail=0)
"""
Explanation: Compute statistic
End of explanation
"""
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
weights=[1, -1])
evoked_contrast.plot(axes=ax2)
plt.show()
"""
Explanation: View time-frequency plots
End of explanation
"""
|
zhuanxuhit/deep-learning
|
autoencoder/Simple_Autoencoder.ipynb
|
mit
|
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
"""
Explanation: A Simple Autoencoder
We'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.
In this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.
End of explanation
"""
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
"""
Explanation: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
End of explanation
"""
# mnist.train.images[0]
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
image_shape = mnist.train.images.shape[1]
inputs_ = tf.placeholder(tf.float32,shape=(None,image_shape),name='inputs')
targets_ = tf.placeholder(tf.float32,shape=(None,image_shape),name='targets')
# Output of hidden layer
encoded = tf.layers.dense(inputs_,encoding_dim,activation=tf.nn.relu)
# Output layer logits
logits = tf.layers.dense(encoded,image_shape) # linear activation
# Sigmoid output from logits
decoded = tf.sigmoid(logits)
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=targets_)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
"""
Explanation: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.
Exercise: Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this tf.nn.sigmoid_cross_entropy_with_logits (documentation). You should note that tf.nn.sigmoid_cross_entropy_with_logits takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function.
End of explanation
"""
# Create the session
sess = tf.Session()
"""
Explanation: Training
End of explanation
"""
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
"""
Explanation: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Calling mnist.train.next_batch(batch_size) will return a tuple of (images, labels). We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with sess.run(tf.global_variables_initializer()). Then, run the optimizer and get the loss with batch_cost, _ = sess.run([cost, opt], feed_dict=feed).
End of explanation
"""
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
"""
Explanation: Checking out the results
Below I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts.
End of explanation
"""
|
weikang9009/pysal
|
tools/gitcount-tables.ipynb
|
bsd-3-clause
|
from __future__ import print_function
import os
import json
import re
import sys
import pandas
import subprocess
from subprocess import check_output
#import yaml
from datetime import datetime, timedelta
from dateutil.parser import parse
import pytz
utc=pytz.UTC
from datetime import datetime, timedelta
from time import sleep
from subprocess import check_output
try:
from urllib import urlopen
except:
from urllib.request import urlopen
import ssl
#import yaml
context = ssl._create_unverified_context()
"""
Explanation: PySAL Change Log Statistics: Table Generation
This notebook generates the summary statistics for use in the 6-month releases of PySAL, which is now a meta package.
It assumes the subpackages have been git cloned in a directory below the location of this notebook. It also requires network connectivity for some of the reporting.
Run this notebook after gitcount.ipynb
End of explanation
"""
CWD = os.path.abspath(os.path.curdir)
CWD
"""
Explanation: with open('../packages.yml') as package_file:
packages = yaml.load(package_file)
End of explanation
"""
start_date = '2019-07-29'
since_date = '--since="{start}"'.format(start=start_date)
since_date
since = datetime.strptime(start_date+" 0:0:0", "%Y-%m-%d %H:%M:%S")
since
with open('package_versions.txt', 'r') as package_list:
packages = dict([line.strip().split() for line in package_list.readlines()])
import pickle
issue_details = pickle.load( open( "issue_details.p", "rb" ) )
pull_details = pickle.load( open( "pull_details.p", "rb" ) )
packages
"""
Explanation: Our last main release was 2019-01-30:
End of explanation
"""
tag_dates = {}
#root = '/home/serge/Dropbox/p/pysal/src/pysal/tmp/'
root = CWD + "/tmp/"
#for record in tags:
for pkg in packages:
#pkg, tag = record.strip().split()
tag = packages[pkg]
print(pkg, tag)
if pkg=='spvcm':
tag = '0.2.1post1'
#tag = tag.split('/')[-1]
pkdir = root+pkg
try:
cmd = "git log -1 --format=%ai v{tag}".format(tag=tag)
os.chdir(pkdir)
result = subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE)
except:
cmd = "git log -1 --format=%ai {tag}".format(tag=tag)
os.chdir(pkdir)
result = subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE)
tag_string = result.stdout.decode('utf-8')
tag_date = tag_string.split()[0]
tag_dates[pkg] = tag_date
print(pkg, tag, tag_date)
os.chdir(CWD)
# get issues for a package and filter on tag date
for pkg in tag_dates.keys():
issues = issue_details[pkg]
tag_date = utc.localize(parse(tag_dates[pkg]))
keep = []
for issue in issues:
closed = parse(issue['closed_at'])
if closed <= tag_date:
keep.append(issue)
print(pkg, len(issues), len(keep))
issue_details[pkg] = keep
keep = []
pulls = pull_details[pkg]
for pull in pulls:
closed = parse(pull['closed_at'])
if closed <= tag_date:
keep.append(pull)
print(pkg, len(pulls), len(keep))
pull_details[pkg] = keep
# commits
cmd = ['git', 'log', '--oneline', since_date]
activity = {}
total_commits = 0
for subpackage in packages:
tag_date = tag_dates[subpackage]
os.chdir(CWD)
os.chdir('tmp/{subpackage}'.format(subpackage=subpackage))
cmd_until = cmd + ['--until="{tag_date}"'.format(tag_date=tag_date)]
ncommits = len(check_output(cmd_until).splitlines())
ncommits_total = len(check_output(cmd).splitlines())
print(subpackage, ncommits_total, ncommits, tag_date)
total_commits += ncommits
activity[subpackage] = ncommits
cmd_until
identities = {'Levi John Wolf': ('ljwolf', 'Levi John Wolf'),
'Serge Rey': ('Serge Rey', 'Sergio Rey', 'sjsrey', 'serge'),
'Wei Kang': ('Wei Kang', 'weikang9009'),
'Dani Arribas-Bel': ('Dani Arribas-Bel', 'darribas'),
'Antti Härkönen': ( 'antth', 'Antti Härkönen', 'Antti Härkönen', 'Antth' ),
'Juan C Duque': ('Juan C Duque', "Juan Duque"),
'Renan Xavier Cortes': ('Renan Xavier Cortes', 'renanxcortes', 'Renan Xavier Cortes' ),
'Taylor Oshan': ('Tayloroshan', 'Taylor Oshan', 'TaylorOshan'),
'Tom Gertin': ('@Tomgertin', 'Tom Gertin', '@tomgertin')
}
def regularize_identity(string):
string = string.decode()
for name, aliases in identities.items():
for alias in aliases:
if alias in string:
string = string.replace(alias, name)
if len(string.split(' '))>1:
string = string.title()
return string.lstrip('* ')
author_cmd = ['git', 'log', '--format=* %aN', since_date]
author_cmd.append('blank')
author_cmd
from collections import Counter
authors_global = set()
authors = {}
global_counter = Counter()
counters = dict()
cmd = ['git', 'log', '--oneline', since_date]
total_commits = 0
activity = {}
for subpackage in packages:
os.chdir(CWD)
os.chdir('tmp/{subpackage}'.format(subpackage=subpackage))
ncommits = len(check_output(cmd).splitlines())
tag_date = tag_dates[subpackage]
author_cmd[-1] = '--until="{tag_date}"'.format(tag_date=tag_date)
#cmd_until = cmd + ['--until="{tag_date}"'.format(tag_date=tag_date)]
all_authors = check_output(author_cmd).splitlines()
counter = Counter([regularize_identity(author) for author in all_authors])
global_counter += counter
counters.update({'.'.join((package,subpackage)): counter})
unique_authors = sorted(set(all_authors))
authors[subpackage] = unique_authors
authors_global.update(unique_authors)
total_commits += ncommits
activity[subpackage] = ncommits
authors_global
activity
counters
counters
def get_tag(title, level="##", as_string=True):
words = title.split()
tag = "-".join([word.lower() for word in words])
heading = level+" "+title
line = "\n\n<a name=\"{}\"></a>".format(tag)
lines = [line]
lines.append(heading)
if as_string:
return "\n".join(lines)
else:
return lines
subs = issue_details.keys()
table = []
txt = []
lines = get_tag("Changes by Package", as_string=False)
for sub in subs:
total= issue_details[sub]
pr = pull_details[sub]
row = [sub, activity[sub], len(total), len(pr)]
table.append(row)
#line = "\n<a name=\"{sub}\"></a>".format(sub=sub)
#lines.append(line)
#line = "### {sub}".format(sub=sub)
#lines.append(line)
lines.extend(get_tag(sub.lower(), "###", as_string=False))
for issue in total:
url = issue['html_url']
title = issue['title']
number = issue['number']
line = "* [#{number}:]({url}) {title} ".format(title=title,
number=number,
url=url)
lines.append(line)
line
table
os.chdir(CWD)
import pandas
df = pandas.DataFrame(table, columns=['package', 'commits', 'total issues', 'pulls'])
df.sort_values(['commits','pulls'], ascending=False)\
.to_html('./commit_table.html', index=None)
df.sum()
contributor_table = pandas.DataFrame.from_dict(counters).fillna(0).astype(int).T
contributor_table.to_html('./contributor_table.html')
totals = contributor_table.sum(axis=0).T
totals.sort_index().to_frame('commits')
totals = contributor_table.sum(axis=0).T
totals.sort_index().to_frame('commits').to_html('./commits_by_person.html')
totals
n_commits = df.commits.sum()
n_issues = df['total issues'].sum()
n_pulls = df.pulls.sum()
n_commits
#Overall, there were 719 commits that closed 240 issues, together with 105 pull requests across 12 packages since our last release on 2017-11-03.
#('{0} Here is a really long '
# 'sentence with {1}').format(3, 5))
line = ('Overall, there were {n_commits} commits that closed {n_issues} issues,'
' together with {n_pulls} pull requests since our last release'
' on {since_date}.\n'.format(n_commits=n_commits, n_issues=n_issues,
n_pulls=n_pulls, since_date = start_date))
line
"""
Explanation: get dates of tags
with open('subtags', 'r') as tag_name:
tags = tag_name.readlines()
End of explanation
"""
with open('changes.md', 'w') as fout:
fout.write(line)
fout.write("\n".join(lines))
fout.write(get_tag("Summary Statistics"))
with open('commit_table.html') as table:
table_lines = table.readlines()
title = "Package Activity"
fout.write(get_tag(title,"###"))
fout.write("\n")
fout.write("".join(table_lines))
with open('commits_by_person.html') as table:
table_lines = table.readlines()
title = "Contributor Activity"
fout.write(get_tag(title,"###"))
fout.write("\n")
fout.write("".join(table_lines))
with open('contributor_table.html') as table:
table_lines = table.readlines()
title = "Contributor by Package Activity"
fout.write(get_tag(title,"###"))
fout.write("\n")
fout.write("".join(table_lines))
"""
Explanation: append html files to end of changes.md with tags for toc
End of explanation
"""
|
tbrx/compiled-inference
|
notebooks/Linear-Regression.ipynb
|
gpl-3.0
|
import numpy as np
import torch
from torch.autograd import Variable
import sys, inspect
sys.path.insert(0, '..')
%matplotlib inline
import pymc
import matplotlib.pyplot as plt
from learn_smc_proposals import cde
from learn_smc_proposals.utils import systematic_resample
import seaborn as sns
sns.set_context("notebook", font_scale=1.5, rc={"lines.markersize": 12})
sns.set_style('ticks')
"""
Explanation: Learn how to perform regression
We're going to train a neural network that knows how to perform inference in robust linear regression models.
The network will have as input:
$x$, a vector of input values, and
$y$, a vector of output values.
It will learn how to perform posterior inference for the parameter vector $w$, in a linear regression model.
End of explanation
"""
num_points = 10 # number of points in the synthetic dataset we train on
def robust_regression(x, t, sigma_0=np.array([10.0, 1.0, .1]), epsilon=1.0):
""" X: input (NxD matrix)
t: output (N vector)
sigma_0: prior std hyperparameter for weights
epsilon: std hyperparameter for output noise """
if x is not None:
N, D = x.shape
assert D == 1
else:
N = num_points
D = 1
# assume our input variable is bounded by some constant
const = 10.0
x = pymc.Uniform('x', lower=-const, upper=const, value=x, size=(N, D), observed=(x is not None))
# create design matrix (add intercept)
@pymc.deterministic(plot=False)
def X(x=x, N=N):
return np.hstack((np.ones((N,1)), x, x**2))
w = pymc.Laplace('w', mu=np.zeros((D+2,)), tau=sigma_0**(-1.0))
@pymc.deterministic(plot=False, trace=False)
def mu(X=X, w=w):
return np.dot(X, w)
y = pymc.NoncentralT('y', mu=mu, lam=epsilon**(-2.0), nu=4, value=t, observed=(t is not None))
return locals()
"""
Explanation: First step: let's define linear regression as a PyMC model. This model has:
an intercept term, a linear term, and a quadratic term
Laplace distribution (double exponential) priors on the weights
T-distributed (heavy-tailed) likelihoods
End of explanation
"""
M_train = pymc.Model(robust_regression(None, None))
def get_observed(model):
return np.atleast_2d(np.concatenate((model.x.value.ravel(), model.y.value.ravel())))
def get_latent(model):
return np.atleast_2d(model.w.value)
def generate_synthetic(model, size=100):
observed, latent = get_observed(model), get_latent(model)
for i in xrange(size-1):
model.draw_from_prior()
observed = np.vstack((observed, get_observed(model)))
latent = np.vstack((latent, get_latent(model)))
return observed, latent
gen_data = lambda num_samples: generate_synthetic(M_train, num_samples)
example_minibatch = gen_data(100)
"""
Explanation: Instantiate the generative model
We'll define M_train, a PyMC model which doesn't have any data attached.
We can use M_train.draw_from_prior() to construct synthetic datasets to use for training.
End of explanation
"""
observed_dim = num_points*2
latent_dim = 3
hidden_units = 300
hidden_layers = 2
mixture_components = 3
dist_est = cde.ConditionalRealValueMADE(observed_dim, latent_dim, hidden_units, hidden_layers, mixture_components)
if torch.cuda.is_available():
dist_est.cuda()
dist_est
"""
Explanation: Define a network which will invert this model
Each of the 3 dimensions of the latent space will be modeled by a mixture of Gaussians.
End of explanation
"""
example_parents = Variable(torch.FloatTensor(example_minibatch[0][:5]))
example_latents = Variable(torch.FloatTensor(example_minibatch[1][:5]))
if torch.cuda.is_available():
example_parents = example_parents.cuda()
example_latents = example_latents.cuda()
print "Sampled from p(latent|parents):\n\n", dist_est.sample(example_parents)
print "Evaluate log p(latent|parents):\n\n", dist_est.logpdf(example_parents, example_latents)
"""
Explanation: We can use our network to sample and to compute logpdfs.
The primary interface is through .sample(parents), and .logpdf(parents, latent).
Both of these expect pytorch tensors as inputs.
End of explanation
"""
def _iterate_minibatches(inputs, outputs, batchsize):
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
excerpt = slice(start_idx, start_idx + batchsize)
yield Variable(torch.FloatTensor(inputs[excerpt])), Variable(torch.FloatTensor(outputs[excerpt]))
def training_step(optimizer, dist_est, gen_data, dataset_size, batch_size, max_local_iters=10, misstep_tolerance=0, verbose=False):
""" Training function for fitting density estimator to simulator output """
# Train
synthetic_ins, synthetic_outs = gen_data(dataset_size)
validation_size = dataset_size/10
validation_ins, validation_outs = [Variable(torch.FloatTensor(t)) for t in gen_data(validation_size)]
missteps = 0
num_batches = float(dataset_size)/batch_size
USE_GPU = dist_est.parameters().next().is_cuda
if USE_GPU:
validation_ins = validation_ins.cuda()
validation_outs = validation_outs.cuda()
validation_err = -torch.mean(dist_est.logpdf(validation_ins, validation_outs)).data[0]
for local_iter in xrange(max_local_iters):
train_err = 0
for inputs, outputs in _iterate_minibatches(synthetic_ins, synthetic_outs, batch_size):
optimizer.zero_grad()
if USE_GPU:
loss = -torch.mean(dist_est.logpdf(inputs.cuda(), outputs.cuda()))
else:
loss = -torch.mean(dist_est.logpdf(inputs, outputs))
loss.backward()
optimizer.step()
train_err += loss.data[0]/num_batches
next_validation_err = -torch.mean(dist_est.logpdf(validation_ins, validation_outs)).data[0]
if next_validation_err > validation_err:
missteps += 1
validation_err = next_validation_err
if missteps > misstep_tolerance:
break
if verbose:
print train_err, validation_err, "(", local_iter+1, ")"
return train_err, validation_err, local_iter+1
optimizer = torch.optim.Adam(dist_est.parameters())
trace_train = []
trace_validation = []
trace_local_iters = []
num_iterations = 500
dataset_size = 2500
batch_size = 250
for i in xrange(num_iterations):
verbose = (i+1) % 25 == 0
if verbose:
print "["+str(1+len(trace_train))+"]",
t,v,l = training_step(optimizer, dist_est, gen_data, dataset_size, batch_size, verbose=verbose)
trace_train.append(t)
trace_validation.append(v)
trace_local_iters.append(l)
plt.figure(figsize=(10,3.5))
plt.plot(np.array(trace_train))
plt.plot(np.array(trace_validation))
plt.legend(['train error', 'validation error']);
plt.plot(np.array(trace_local_iters))
plt.legend(['iterations per dataset'])
"""
Explanation: Optimize network parameters
The training_epoch code samples a synthetic dataset, and performs minibatch updates on it for a while. Optionally, it can decide when to stop by examining synthetic validation data.
End of explanation
"""
def gen_example_pair(model):
model.draw_from_prior()
data_x = model.X.value
data_y = model.y.value
true_w = model.w.value
return data_x, data_y, true_w
def estimate_MCMC(data_x, data_y, ns, iters=10000, burn=0.5):
""" MCMC estimate of weight distribution """
mcmc_est = pymc.MCMC(robust_regression(data_x[:,1:2], data_y))
mcmc_est.sample(iters, burn=burn*iters, thin=np.ceil(burn*iters/ns))
trace_w = mcmc_est.trace('w').gettrace()[:ns]
return trace_w
def estimate_NN(network, data_x, data_y, ns):
""" NN proposal density for weights """
nn_input = Variable(torch.FloatTensor(np.concatenate((data_x[:,1], data_y[:]))))
print nn_input.size()
nn_input = nn_input.unsqueeze(0).repeat(ns,1)
if network.parameters().next().is_cuda:
nn_input = nn_input.cuda()
values, log_q = network.propose(nn_input)
return values.cpu().data.numpy(), log_q.squeeze().cpu().data.numpy()
def sample_prior_proposals(model, ns):
samples = []
for n in xrange(ns):
model.draw_from_prior()
samples.append(model.w.value)
return np.array(samples)
def compare_and_plot(ns=100, alpha=0.05, data_x=None, data_y=None, true_w=None):
model = pymc.Model(robust_regression(None, None))
prior_proposals = sample_prior_proposals(model, ns*10)
if data_x is None:
data_x, data_y, true_w = gen_example_pair(model)
mcmc_trace = estimate_MCMC(data_x, data_y, ns)
nn_proposals, logq = estimate_NN(dist_est, data_x, data_y, ns*10)
mcmc_mean = mcmc_trace.mean(0)
nn_mean = nn_proposals.mean(0)
print
print "True (generating) w:", true_w
print "MCMC weight mean:", mcmc_mean
print "NN weight proposal mean:", nn_mean
domain = np.linspace(min(data_x[:,1])-2, max(data_x[:,1])+2, 50)
plt.figure(figsize=(14,3))
plt.subplot(141)
plt.plot(domain, mcmc_mean[0] + mcmc_mean[1]*domain + mcmc_mean[2]*domain**2, "b--")
for i in range(ns):
plt.plot(domain, mcmc_trace[i,0] + mcmc_trace[i,1]*domain + mcmc_trace[i,2]*domain**2, "b-", alpha=alpha)
plt.plot(data_x[:,1], data_y, "k.")
plt.xlim(np.min(domain),np.max(domain))
limy = plt.ylim()
plt.legend(["MH posterior"])
ax = plt.subplot(143)
plt.plot(domain, nn_mean[0] + nn_mean[1]*domain + nn_mean[2]*domain**2, "r--")
for i in range(ns):
plt.plot(domain, nn_proposals[i,0] + nn_proposals[i,1]*domain + nn_proposals[i,2]*domain**2, "r-", alpha=alpha)
plt.plot(data_x[:,1], data_y, "k.")
plt.legend(["NN proposal"])
plt.ylim(limy)
plt.xlim(min(domain),max(domain));
ax.yaxis.set_ticklabels([])
ax = plt.subplot(142)
prior_samples_mean = prior_proposals.mean(0)
prior_proposals = prior_proposals[::10]
plt.plot(domain, prior_samples_mean[0] + prior_samples_mean[1]*domain + prior_samples_mean[2]*domain**2, "c--")
for i in range(ns):
plt.plot(domain, prior_proposals[i,0] + prior_proposals[i,1]*domain + prior_proposals[i,2]*domain**2, "c-", alpha=alpha)
plt.plot(data_x[:,1], data_y, "k.")
plt.legend(["Prior"])
plt.ylim(limy)
plt.xlim(min(domain),max(domain));
ax.yaxis.set_ticklabels([])
# compute NN-IS estimate
logp = []
nn_test_model = pymc.Model(robust_regression(data_x[:,1:2], data_y))
for nnp in nn_proposals:
nn_test_model.w.value = nnp
try:
next_logp = nn_test_model.logp
except:
next_logp = -np.Inf
logp.append(next_logp)
logp = np.array(logp)
w = np.exp(logp - logq) / np.sum(np.exp(logp - logq))
nnis_mean = np.sum(w*nn_proposals.T,1)
print "NN-IS estimated mean:", nnis_mean
print "NN-IS ESS:", 1.0/np.sum(w**2), w.shape[0]
ax = plt.subplot(144)
plt.plot(domain, nnis_mean[0] + nnis_mean[1]*domain + nnis_mean[2]*domain**2, "g--")
nn_resampled = nn_proposals[systematic_resample(np.log(w))][::10]
for i in range(ns):
plt.plot(domain, nn_resampled[i,0] + nn_resampled[i,1]*domain + nn_resampled[i,2]*domain**2, "g-", alpha=alpha)
plt.plot(data_x[:,1], data_y, "k.")
plt.legend(["NN-IS posterior"])
plt.ylim(limy)
plt.xlim(min(domain),max(domain));
ax.yaxis.set_ticklabels([])
plt.tight_layout()
compare_and_plot();
compare_and_plot();
compare_and_plot();
compare_and_plot();
compare_and_plot();
"""
Explanation: Define plotting and testing functions
We'll use PyMC's default Metropolis-Hastings as a benchmark, and compare to sampling directly from the learned model, and importance sampling.
End of explanation
"""
|
f-guitart/data_mining
|
notes/01 - Apache Spark Introduction.ipynb
|
gpl-3.0
|
import pyspark
sc = pyspark.SparkContext(appName="my_spark_app")
sc
"""
Explanation: Using Apache Spark
Spark applications run as independent sets of processes on a cluster, coordinated by the SparkContext object in your main program (called the driver program).
SparkContext allocate resources across applications.
Once connected, Spark acquires executors on nodes in the cluster, which are processes that run computations and store data for your application.
Next, it sends your application code (defined by JAR or Python files passed to SparkContext) to the executors.
Finally, SparkContext sends tasks to the executors to run.
End of explanation
"""
## just check that sc variables is not
print("is SpartContext loaded?", sc != '')
"""
Explanation: Interactive programming: is the procedure of writing parts of a program while it is already active. The Jupyter Notebook will be the frontend for our active program.
For interactive programming we will have:
* A Jupyter/IPython notebook: where we run Python code
* PySparkShell application UI: to monitor Spark Cluster
Monitoring Spark Jobs
Every SparkContext launches its own instance of Web UI which is available at http://[master]:4040 by default.
Web UI comes with the following tabs:
* Jobs
* Stages
* Storage with RDD size and memory use
* Environment
* Executors
* SQL
This information is available only until the application is running by default.
Jobs
Job id
Description
Submission date
Job Duration
Stages
Tasks
Stages
What is a Stage?:
A stage is a physical unit of execution. It is a step in a physical execution plan.
A stage is a set of parallel tasks, one per partition of an RDD, that compute partial results of a function executed as part of a Spark job.
In other words, a Spark job is a computation with that computation sliced into stages.
A stage is uniquely identified by id. When a stage is created, DAGScheduler increments internal counter nextStageId to track the number of stage submissions.
A stage can only work on the partitions of a single RDD (identified by rdd), but can be associated with many other dependent parent stages (via internal field parents), with the boundary of a stage marked by shuffle dependencies.
Storage
Storage page permit us to see how RDD are partitioned across the cluster.
Environment
This tab shows configuration and variables used in Apache Spark execution.
Executors
In this tab, we can see information about executors available in the cluster.
We can have relevant information about CPU and Memory, as well as RDD storage.
We can also have information about executed tasks.
Main Spark Concepts
Partitions
Spark’s basic abstraction is the Resilient Distributed Dataset, or RDD.
That fragmentation is what enables Spark to execute in parallel, and the level of fragmentation is a function of the number of partitions of your RDD.
Caching
You will often hear: "Spark handles all data in memory".
This is tricky and here's where the magic relies. Most of the time you will be working with metadata not with all the data, and computations are only left for the time that you need the results.
Storing that results or leaving them to compute them again has a high impact in response times. When you store the results, it is said to be catching the RDD.
Shuffling
(from: https://0x0fff.com/spark-architecture-shuffle/)
(more about shuffling: https://spark.apache.org/docs/1.3.1/programming-guide.html#performance-impact)
(best practices: https://robertovitillo.com/2015/06/30/spark-best-practices/)
There are many different tasks that require shuffling of the data across the cluster, for instance table join – to join two tables on the field “id”, you must be sure that all the data for the same values of “id” for both of the tables are stored in the same chunks.
Imagine the tables with integer keys ranging from 1 to 1’000’000. By storing the data in same chunks I mean that for instance for both tables values of the key 1-100 are stored in a single partition/chunk, this way instead of going through the whole second table for each partition of the first one, we can join partition with partition directly, because we know that the key values 1-100 are stored only in these two partitions. To achieve this both tables should have the same number of partitions, this way their join would require much less computations. So now you can understand how important shuffling is.
Exercises
(from: http://blog.insightdatalabs.com/jupyter-on-apache-spark-step-by-step/)
Exercise 1: Check that SparkContext is loaded in your current environment.
Exercise 2: Create your first RDD with 20 partitions and check WebUI that the RDD has created a job, an stage and 20 partitions. The RDD must contain a list of 1000 integers starting from 0. Get the number of partitions using getNumPartitions().
(Hint 1: you can use sc.parallelize)
(Hint 2: check Spark API docs: http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext.parallelize)
Exercise 3: Get 5 elements of the RDD.
Exercise 4: Name the RDD as "my_rdd" and persist it into memory and disk serialized.
Exercise 5: Perform a transformation to group the numbers into the lowest 100s and count the total frequency for each bin.
Exercise 6: Browse the WebUI. And:
* identify the RDD generated in Exercise X and its job
* identify the job in Exercise X
* check that the RDD has been cached
* identify the job in Exercise X
Answer 1:
End of explanation
"""
rdd = sc.parallelize([x for x in range(1000)],20)
rdd.getNumPartitions()
"""
Explanation: Answer 2:
End of explanation
"""
rdd.take(5)
"""
Explanation: Answer 3:
End of explanation
"""
rdd.setName("my_rdd").persist(pyspark.StorageLevel.MEMORY_AND_DISK_SER)
"""
Explanation: Answer 4:
End of explanation
"""
rdd.map(lambda r: (round(r/100)*100, 1))\
.reduceByKey(lambda x,y: x+y)\
.collect()
"""
Explanation: Answer 5:
End of explanation
"""
|
oditorium/blog
|
iPython/MonteCarlo2-Cholesky.ipynb
|
agpl-3.0
|
import numpy as np
d = 4
R = np.random.uniform(-1,1,(d,d))+np.eye(d)
C = np.dot(R.T, R)
#C, sort(eigvalsh(C))[::-1]
"""
Explanation: iPython Cookbook - Monte Carlo II
Generating a Monte Carlo vector using Cholesky Decomposition
Theory
Before we go into the implementation, a bit of theory on Monte Carlo and linear algebra, and in particular the Cholesky decomposition. Firstly Monte Carlo: assume we have a Standard Gaussian vector $Z=(Z_i)$ and we want a general Gaussian vector $X=(X_i)$ with correlation matrix $C = (C_{ij})$. Because everything is linear we know that $X$ will be of the form $X = Z \cdot M$ with some matrix $M$ (we assume the $X$ have zero expectation for simplicity; also we multiply vectors from the left because we prefer row vectors).
Assuming that we have such matrix M what is the corresponding correlation matrix $C^M$? In order to find out we need to calculate
$$
E[X_iX_j] = \sum_{\mu\nu} M_{\mu i} M_{\nu j} E[Z_\mu Z_\nu] = \sum_{\mu} M_{\mu i} M_{\mu j} = (M^tM)_{ij}
$$
ie we find that the covariance matrix is the square of $M$ (in the matrix sense, ie $M^tM$ where $M^t$ is the transposed matrix, ie with rows and columns interchanged). So if we want to generate a vector $X$ with covariance $C$ what we effectively need is a matrix $M$ such that $C= M^tM$.
Enter Cholesky decomposition: in a nutshell, if a matrix $M$ is symmetric and positive definite (with strictly positive eigenvaluesl; as every non-degenerate covariance matrix is) then there is a unique decomposition $C = L^tL$ with the $L$ being a triangular matrix.
Implementation
Generating a covariance matrix
First we generat a covariance matrix. This is not entirely trivial - the matrix must be symmetric and positive definite - and one way going about is to simply write $C = R^tR$ where $R$ is any random matrix (note that this is not a particularly good covariance matrix, because it is pretty close to the one-systemic-factor model)
End of explanation
"""
from scipy.linalg import cholesky
M = cholesky(C)
M
"""
Explanation: Decomposing the covariance matrix
We are given a covariance matrix $C$ and we want to find a matrix $M$ such that $M^tM=C$. One such matrix is given by running a Cholesky decomposition that is implemented in Python as scipy.linalg.cholesky()
End of explanation
"""
N = 10000
z = np.random.standard_normal((N, d))
z
"""
Explanation: Generating $z$
We now generate our Standard Gaussian $z$, as usual one row being one observation ($N$ is the number of rows)
End of explanation
"""
x = np.dot(z,M)
x
"""
Explanation: Generating $x$
We now generate our $x$ by multiplying $z \cdot M$
End of explanation
"""
C1 = np.cov(x, rowvar=0, bias=1)
C1, C, sort(eigvalsh(C1))[::-1],sort(eigvalsh(C))[::-1]
"""
Explanation: Check
We now check that the ex-post covariance matrix $C1$ is reasonably close to the ex-ante matrix $C$
End of explanation
"""
import sys
print(sys.version)
"""
Explanation: Licence and version
(c) Stefan Loesch / oditorium 2014; all rights reserved
(license)
End of explanation
"""
|
ratt-ru/bullseye
|
writing_and_wiki/notebooks/Error relations.ipynb
|
gpl-2.0
|
%install_ext https://raw.githubusercontent.com/mkrphys/ipython-tikzmagic/master/tikzmagic.py
%load_ext tikzmagic
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
"""
Explanation: Setup
End of explanation
"""
%%tikz --scale 2 --size 600,600 -f png
\draw [black, domain=0:180] plot ({2*cos(\x)}, {2*sin(\x)});
\draw [black, ->] (0,0) -- (0,2);
\draw [black, ->] (0,0) -- ({-0.75*2},{sqrt(4-(-0.75*2)*(-0.75*2))});
\node [above] at (0,1*2) {$n=1$};
\node [right] at (0,0.5*2) {$1$};
\node [right] at (-0.5*2,0.5*2) {$1$};
\node [left] at ({-0.75*2},{sqrt(4-(-0.75*2)*(-0.75*2))}) {$n=\sqrt{1-l^2-m^2}$};
\draw [blue,thick] (-2,2) -- (2,2);
\draw [red, ->] ({-0.75*2},{sqrt(4-(-0.75*2)*(-0.75*2))}) -- ({-0.75*2},2);
\node [left] at ({-0.75*2},{sqrt(4-(-0.75*2)*(-0.75*2))+0.15*2}) {$\epsilon$};
\node [right] at ({1*2},{1*2}) {$l$};
\node [right] at ({1*2},{0*2}) {$l=1$};
\node [left] at ({-1*2},{0*2}) {$l=-1$};
"""
Explanation: Background
Recall that the simplistic measurement equation can be defined as follows:
\begin{equation}
V_{measured}(u,v,w) \approx \left<\int_{sources}I(l,m,n)e^{\frac{2\pi i}{\lambda}(ul+vm+w(n-1))}\frac{dldm}{n}\right> \text{ where } n\approx\sqrt{1-l^2-m^2}
\end{equation}
In order to use a 2 dimensional discrete fourier transform $n-1\approx 0$ (the tangent planar approximation better be close to the celestial sphere). This assumption is invalid in widefield imaging and it is necessary to correct for the resulting phase delay.
Let us define the phase error introduced by imaging over wider fields of view with non-coplanar baselines (non-zero w terms) as
\begin{equation}
\xi:=\frac{2{\pi}||\Delta{w}||\epsilon}{{\lambda_{min}}{n_{\text{planes}}}} \text{ and ideally } 0{\leq\xi\ll}1
\end{equation}
Here $\epsilon$ represents the distance between the celestial sphere and the [tangential] planar projection. For simplicity we assume an orthogonal (SIN projection in FITS nomenclature) coordinate projection is used. In other words for each $(l,m,n)$ coordinate $n = 1$ where n is defined to be in the direction of the phase centre, with orthogonal $l$ and $m$ direction cosines. $l$ and $m$ are the direction cosines with respect to $u$ and $v$ respectively.
In order to decrease $\xi$ we need $n_{\text{planes}}{\rightarrow}\infty$. $n_{planes}$ represent the number of w-projection planes needed to drive down the phase error, $\xi$.
End of explanation
"""
%%tikz --scale 2 --size 600,600 -f png
\draw [black,thick] (0,2) -- (3,2);
\draw [black,thick] (0,0) -- (3,0);
\draw [black, domain={180+15}:{360-15}] plot ({(cos(\x)+1)*0.25}, {(sin(\x))*0.25+2});
\draw [black] ({0.25*0.5},{2 - sqrt(0.25*0.25*(1-0.5*0.5))}) -- ({0.25},{2});
\draw [black] ({0.25*1.5},{2 - sqrt(0.25*0.25*(1-0.5*0.5))}) -- ({0.25},{2});
\draw [black, domain={180+15}:{360-15}] plot ({(cos(\x)-1)*0.25+3}, {(sin(\x))*0.25});
\draw [black] ({3-0.25*0.5},{0 - sqrt(0.25*0.25*(1-0.5*0.5))}) -- ({3-0.25},{0});
\draw [black] ({3-0.25*1.5},{0 - sqrt(0.25*0.25*(1-0.5*0.5))}) -- ({3-0.25},{0});
\draw [red,thick,<->] ({3-0.25},{0}) -- ({3-0.25},{2});
\node [right] at ({3-0.25},{1}) {$||\Delta{w}||$};
\node [right] at ({3},{2}) {$w_{max}$};
\node [right] at ({3},{0}) {$w_{min}$};
"""
Explanation: The effect of a large range of w values is apparent from the equation for phase error above. For very long baselines, especially those originating from non-coplanar arrays (e.g VLBI) over extended periods of time this w term has a multiplicative effect on the error:
\begin{equation}
\xi\propto||\Delta{w}||\epsilon
\end{equation}
It must be emphasized that over an extended period of time the baselines of any general non-East-West array will be rotated up into the w-direction
End of explanation
"""
def compute_lmn(phase_centres,image_coordinate):
delta_ra = - phase_centres[0] + image_coordinate[0]
dec0 = phase_centres[1]
dec = image_coordinate[1]
return (np.cos(dec)*np.sin(delta_ra),
np.sin(dec)*np.cos(dec0)-np.cos(dec)*np.sin(dec0)*np.cos(delta_ra),
np.sin(dec)*np.sin(dec0)+np.cos(dec)*np.cos(dec0)*np.cos(delta_ra))
def construct_rot_matrix(ra,dec):
rot_matrix = [[np.sin(ra),np.cos(ra),0],
[-np.sin(dec)*np.cos(ra),np.sin(dec)*np.sin(ra),np.cos(dec)],
[np.cos(dec)*np.cos(ra),-np.cos(dec)*np.sin(ra),np.sin(dec)]]
return np.matrix(rot_matrix)
def compute_new_uvw(old,new,uvw):
rot_matrix = construct_rot_matrix(old[0],old[1])
rot_matrix_new = construct_rot_matrix(new[0],new[1])
return rot_matrix_new * rot_matrix.T * np.matrix(uvw).T #transpose of a Euler rotation matrix is the inverse rotation
"""
Explanation: Epsilon for w-projected images
The maximum error occurs at one of the corners of the facet/image, ie. lets say at:
\begin{equation}
\begin{split}
d &= (\theta_l/2,\theta_m/2)\
\end{split}
\end{equation}
Where $\theta_l=n_xcell_x \text{ rads and } \theta_m=n_ycell_y \text{ rads}$
The following identities relate the directions (assumed to be given in right assension and declination $\theta_l/2$ and $\theta_m/2$ to points on the celestial sphere.
\begin{equation}
\begin{split}
l &= \cos{\delta}\sin{\Delta\alpha}\
m &= \sin{\delta}\cos{\delta_0} - \cos{\delta}\sin{\delta_0}\cos{\Delta\alpha}\
n &= \sin{\delta}\sin{\delta_0} + \cos{\delta}\cos{\delta_0}\cos{\Delta\alpha}\
\end{split}
\end{equation}
The difference between a point on the orthogonally projected image and the corresponding point on the celestial sphere is given by:
\begin{equation}
\epsilon = ||n - 1|| = ||\sqrt{1-(\Delta{(l/2)})^2-(\Delta{(m/2)})^2} - 1||
\end{equation}
where $\Delta{(l/2)}$ and $\Delta{(m/2)}$ correspond to the direction cosines of the point at $(\alpha + \theta_l/2,\delta + \theta_m/2)$ where $\alpha$ and $\delta$ correspond to the phase centre at the centre of the facet/image.
The corresponding relation between number of planes and epsilon is given as:
\begin{equation}
n_{\text{planes}}=\frac{2{\pi}||\Delta{w}||\epsilon}{{\lambda_{min}}{\xi}} \text{ and ideally } 0{\leq\xi\ll}1
\end{equation}
Epsilon for faceted images
Here the half facet size (in l and m) is given as $\theta_{l_f}/2 = \theta_l/(2n_{facets})$ and $\theta_{m_f}/2 = \theta_m/(2n_{facets})$ respectively. We know the arc subtended by the angle to the corner of the facet has length $\cos{(\theta_{l_f}/2)}\cos{(\theta_{m_f}/2)}$ using the spherical rule of cosines and assuming a unit celestial sphere and orthogonal u and v bases. This angle to the corner of the image is a small number, so we might as well just use the small angle approximation:
\begin{equation}
\epsilon \approx \sin{(\delta_0 + \theta_l/2)}\sin{\delta_0} + \cos{(\delta_0 + \theta_l/2)}\cos{\delta_0}\cos{(\theta_m/2)} - \cos{\left[max(\theta_{l},\theta_{m})/(2n_{facets})\right]}
\end{equation}
This results in the following relation between half the number of linearly spaced facets (along a single diagonal of the facet image) and $\xi$:
\begin{equation}
n_{facets} = \frac{max(\theta_l,\theta_m)}{2\cos^{-1}{\left[\sin{(\delta_0 + \theta_l/2)}\sin{\delta_0} + \cos{(\delta_0 + \theta_l/2)}\cos{\delta_0}\cos{(\theta_m/2)}-\frac{\lambda_{min}\xi}{2{\pi}||\Delta{w}||}\right]}}
\end{equation}
End of explanation
"""
nx = ny = 1024
cellx = celly = 8 / 60.0 / 60.0 * np.pi / 180.0 #8*1024 arcsec in radians
ra = (290 + 25 / 60 + 0 / 60 / 60) * np.pi / 180
dec = (21 + 45 / 60 + 0 / 60 / 60) * np.pi / 180
phase_centre = np.array([ra,dec]) #should be read from MS
max_err = phase_centre + np.array([cellx*nx/2.0,celly*ny/2.0])
lmn_max_err = compute_lmn(phase_centre,max_err)
delta_w = 1031.2111327 #should be read from MS
min_lambda = 0.15762 #should be read from MS
e = np.abs(lmn_max_err[2]-1)
threshold = 0.5
num_planes_needed = np.ceil(2 * np.pi * delta_w * e / (min_lambda * threshold))
(li,mi,ni) = compute_lmn(phase_centre,phase_centre + np.array([cellx*nx/2.0,celly*ny/2.0]))
num_facets_needed = np.ceil(np.sqrt((cellx*nx) / (2*np.arccos(ni - (min_lambda *threshold)/(2*np.pi*delta_w))))*2)
print num_planes_needed, "planes needed to obtain seperation"
print num_facets_needed, "facets needed along each dimension of the final image to obtain speration"
"""
Explanation: Calculator
End of explanation
"""
|
debsankha/network_course_python
|
talks/01-pythonbasics-builtins.ipynb
|
gpl-2.0
|
# print( # Tab now should display the docstring
# Also woks:
print??
"""
Explanation: Table of Contents
1. Introduction to Interactive Network Analysis and Visualization with Python
1.1 What is Python?
1.1.1 How to use Python
1.2 A short intro to jupyter
1.2.1 Markdown is cool
1.2.1.1 This is a heading
1.2.2 Use the TAB
1.2.3 Write your own documentations
1.2.4 Inline plots & interactive widgets
2. Short tutorial on python
2.1 datatypes and operators
2.1.1 integer, float, strings
2.1.2 Assignments
2.1.3 Conversion of datatypes:
2.1.4 Arithmetic and operators
2.2 Conditions and control statements (if, while)
2.2.1 Comparison operators:
2.2.2 is operator
2.2.3 If ... else ...
2.3 While loops:
2.3.1 Warning: Make sure that the condition gets True after a finite number of steps!
2.4 Sequences & for-loops
2.4.1 Sequences
2.4.2 Specialities of lists
2.4.2.1 In contrast to tuples and strings, lists are mutable. Items can be replaced, removed or added.
2.4.2.2 Warning: assignment to lists do not create a copy
2.4.3 Specialities of strings:
2.4.3.1 Strings have some nice methods
2.4.4 For-loops
2.4.4.1 Works on strings, tuples:
2.4.4.2 Did I tell you about string formatting?
2.4.5 List comprehensions: A short way to create a sequence.
2.4.6 What are tuples for, you ask?
2.4.7 dictionaries:
2.5 functions:
2.6 Using modules
1. Introduction to Interactive Network Analysis and Visualization with Python
a GGNB short method course offered by Debsankha Manik and Jana Lassar at the MPI for Dynamics and Self-Organization in September 2014
1.1 What is Python?
a general-purpose, high-level programming language.
free and open source
easy to learn and easy to read
portable to any OS
saves your time rather than CPU time
used by many scientists
-> large set of modules for data processing/analysis/visualization/...
<img
src="http://image.slidesharecdn.com/scientific-python-151121091532-lva1-app6892/95/scientific-python-28-638.jpg?cb=1448101074" />
1.1.1 How to use Python
interactive interpreter (python, ipython)
scripts (with any text editor, there is no standard IDE)
jupyter (aka ipython notebook)
1.2 A short intro to jupyter
1.2.1 Markdown is cool
Markdown is a lightweight markup language.
1.2.1.1 This is a heading
This is a list:
apples
oranges
pears
A numbered list:
apples
oranges
pears
LaTeX equations
\begin{align}
f(x \; | \; \mu, \sigma) = \frac{1}{\sigma\sqrt{2\pi} } \; e^{ -\frac{(x-\mu)^2}{2\sigma^2} }
\end{align}
And this is a link where you find more about markdown syntax.
1.2.2 Use the TAB
End of explanation
"""
def isprime(n):
"""Determine if a given number is a prime number"""
for x in range(2, n-1):
if n % x == 0: #if n modulo x equals 0
return False #then n is not a prime number
return True
isprime??
"""
Explanation: 1.2.3 Write your own documentations
End of explanation
"""
%matplotlib inline
import pylab as pl
import numpy as np
from IPython.html.widgets import interact
def plot(frequency):
x = np.arange(1000)
pl.plot(x, np.sin(2*np.pi*frequency*x/1000))
interact(plot, frequency=(0,20,1))
"""
Explanation: 1.2.4 Inline plots & interactive widgets
End of explanation
"""
#name = object
a = 17
b = 3.14
c = "bla"
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
"""
Explanation: 2. Short tutorial on python
2.1 datatypes and operators
2.1.1 integer, float, strings
End of explanation
"""
a = 17
a = "Used names can always be reassigned to other objects regardles of their data type!"
print(a)
"""
Explanation: 2.1.2 Assignments
End of explanation
"""
print(int(5.5))
print(float('5.23'))
print(str(12))
print(bool('True'))
"""
Explanation: 2.1.3 Conversion of datatypes:
End of explanation
"""
print(x+y)
print("bla" + "bla")
print(x/y)
print(x*2)
print('bla'*3)
print(x/2)
print(x**3) #the exponentiation operator
print(x%2) #the remainder operator
"""
Explanation: 2.1.4 Arithmetic and operators
+
-
*
/
// (integer division)
% (modulo operator)
** (power)
End of explanation
"""
print(x==y)
print(x==5)
"""
Explanation: 2.2 Conditions and control statements (if, while)
2.2.1 Comparison operators:
| Operator | True, if |
| ------------- |:-------------:|
| a == b | a equals b |
| a > b | a is larger than b |
| a < b | a is smaller than b |
| a >= b | a is larger than b or equals b |
| a <= b | a is smaller than b or equals b |
| a != b | a and b are unequal |
| a is b | a is the same object as b |
| a is not b| a is not the same object as b |
End of explanation
"""
x = 3
y = 3
x is y
"""
Explanation: 2.2.2 is operator
End of explanation
"""
x = [1]
y = [1]
x is y
"""
Explanation: '==' compares values while 'is' compares identities
End of explanation
"""
x==y
"""
Explanation: But:
End of explanation
"""
from math import sin, pi
sin(2*pi)==0
"""
Explanation: Warning: do not check equality of two floats (finite precision!!)
End of explanation
"""
number_of_people = 6
if number_of_people < 5:
print('Not enough people to play this game.')
elif number_of_people < 10:
print('More would be better, but its sufficient.')
elif number_of_people < 20:
print('Perfect! Enjoy!')
elif number_of_people < 30:
print('Less would be better, but it will work somehow.')
else:
print('Sorry, but more than 30 is too much.')
"""
Explanation: 2.2.3 If ... else ...
End of explanation
"""
x = 12
#the long version:
if x%2==0:
message = "Even."
else:
message = "Odd."
print(message)
#the short version:
print( "Even." if x%2==0 else "Odd." )
"""
Explanation: Conditional expressions:
End of explanation
"""
value = 17
while value < 21:
print(value)
value = value + 1
value = 17
max_value = 30
while True:
value = value + 1
if value > max_value:
break #stop here and escape the while loop
elif value%2==0:
continue #stop here and continue the while loop
print(value)
"""
Explanation: 2.3 While loops:
End of explanation
"""
a = [1,2,3,4,5] #a list
b = (1,2,3,4,5) #a tuple
c = '12345' #a string
"""
Explanation: 2.3.1 Warning: Make sure that the condition gets True after a finite number of steps!
2.4 Sequences & for-loops
2.4.1 Sequences
| Sequence | mutable? | data type |
| ------------- |:-------------:|:--------:|
| list | yes | arbitrary |
| tuple | no | arbitrary |
| string | no | Unicode symbols |
End of explanation
"""
nested_list = [[1,2,3],[4,5,6],[7,8,9]]
"""
Explanation: Since lists and tuples can contain arbitrary data types, they can be 'nested':
End of explanation
"""
print(len(a),len(b),len(c))
print( a + a )
print( b + b )
print( c + c )
"""
Explanation: All three sequence types (tuples, strings and lists) share much of their syntax and functionality.
End of explanation
"""
print( a[0], b[1], c[2] )
"""
Explanation: single items are accessible by their index (starting from 0):
End of explanation
"""
print ( a[-1], b[-3] )
"""
Explanation: Negative indices are counted from the end (starting with -1)
End of explanation
"""
print( a[1:4] ) #get items from 1 to 4
print( a[3:5] ) #get items from 3 to 5
print( a[:4] ) #get items from 0 to 4
print( a[3:] ) #get items from 3 to the end
print( a[::2] ) #get every second item
"""
Explanation: A subset of items can be accessed by "slices".
Syntax: [I:J:K] means start from index I, stop at index J and take every K'th item. If I is omitted, start from the first item, if J is omitted, stop at the last item, and if K is omitted, take every item.
End of explanation
"""
print((2,3) in (1,2,3,4,5))
print('cde' in 'abcdefgh')
"""
Explanation: The in-operator checks whether an item is in the sequence:
End of explanation
"""
a = [1,2,3,4] #create list
a[2] = 12 #replace item 2 by value 12
a.append(34) #add value 34 to the end
a.extend([0,0,0]) #add several values to the end
a.pop() #remove last item
a.insert(3, 'blub')#insert object before index 3
a.reverse() #reverse list
print(a)
"""
Explanation: 2.4.2 Specialities of lists
2.4.2.1 In contrast to tuples and strings, lists are mutable. Items can be replaced, removed or added.
End of explanation
"""
s = [1,2]
t = s
t.append(99)
print(s, t)
"""
Explanation: 2.4.2.2 Warning: assignment to lists do not create a copy
End of explanation
"""
# Strings can be enlosed by both single quote and double quote.
s='My home'
t="My home"
s==t
"""
Explanation: 2.4.3 Specialities of strings:
End of explanation
"""
newstring="This is Mary's home"
anotherstring='And he said: "Let there be light"'
# And if you **really** need it, both single and double quotes:
huge_string="""This 'string' contains "both" types of quote"""
"""
Explanation: strings can contain quotation marks themselves. single/double quotes become important then:
End of explanation
"""
print(huge_string.upper())
print(huge_string.startswith('a'))
print(huge_string.find('contain'))
print(huge_string.split(' '))
print(huge_string.count('s'))
"""
Explanation: 2.4.3.1 Strings have some nice methods
End of explanation
"""
i = 1
while i < 50:
if i%7 == 0:
print(i)
i += 1
for i in range(1, 50, 1):
if i%7 == 0:
print(i)
"""
Explanation: 2.4.4 For-loops
Remember how while loops were prone to infinite loop bugs?
Python gives you a chance to avoid them in most cases:
End of explanation
"""
count = 0
st = "home, sweet home"
for char in st:
if char == 'h':
count += 1
print("%c appears %d times in \"%s\""%('h', count, st))
"""
Explanation: 2.4.4.1 Works on strings, tuples:
End of explanation
"""
print("%s is %d years old"%('John', 34))
print("{name} is {age} years old, {name} has 3 children".format(name='John', age=34))
"""
Explanation: 2.4.4.2 Did I tell you about string formatting?
End of explanation
"""
#Example: We want to sum up all numbers betwen 0 and 100.
#Instead of manually typing a list of all numbers, we can use range:
s = 0
for i in range(101):
s = s + i
print(s)
"""
Explanation: For-loops can not only iterate through sequences, but also through 'iterable' objects, like range().
End of explanation
"""
#long version: "for-loop"
li = []
for i in range(100):
li.append(i*2)
#short version:
li = [2*i for i in range(101)]
print(li)
"""
Explanation: 2.4.5 List comprehensions: A short way to create a sequence.
End of explanation
"""
li = [1/i for i in range(101) if i != 0]
print(li)
"""
Explanation: List comprehensions can be used as a filter:
End of explanation
"""
li = [1/i if i!=0 else None for i in range(101) ]
print(li)
"""
Explanation: Also to place a default value
End of explanation
"""
nested_list = [[1,2], [3], [4,5,6]]
flattened_list = [item for sublist in nested_list for item in sublist]
print(flattened_list)
"""
Explanation: And to write insanely obtuse code and mess with your collaborators :D
End of explanation
"""
l = [1,2,3,4,5]
t = [1,2,3,4,5]
#or, simply:
t = tuple(l)
print(l[2:5])
print(t[2:5])
print(2 in l)
print(2 in t)
l[2] = 99
print(l)
t[2] = 99
print(t)
"""
Explanation: 2.4.6 What are tuples for, you ask?
They are almost like lists, but immutable:
End of explanation
"""
d={'germany':'berlin', 'france':'paris', 'poland':'warsaw', 'denmark':'copenhagen'}
d['germany']
"""
Explanation: What's the use of immutability? glad you asked
2.4.7 dictionaries:
stores key->value pairs:
End of explanation
"""
d['belgium']='brussels'
"""
Explanation: You can add keys anytime:
End of explanation
"""
d.keys()
d.values()
"""
Explanation: useful things you can do with dictionaries:
End of explanation
"""
mycrazydict = {(1,3,7):'odd!', [2,4,6]: 'even!'}
"""
Explanation: Any datatype can be used as a key in dictionary, so long as it is hashable
Thumbrule: integer, float, tuple, string are OK; ~~list~~, other ~~dictionaries~~ are NOT OK
End of explanation
"""
def triple(x):
return x*3
triple(4)
"""
Explanation: 2.5 functions:
End of explanation
"""
def triple(x):
"""
This function accepts a number, returns 3 times that number.
"""
return x*3
triple #Shift+tab shows documentation in ipython
help(triple) #this also works
"""
Explanation: Very important: writing documentation for your code
End of explanation
"""
def modify(l):
l.append(23)
return 0
l=[1,2,3]
modify(l)
print(l)
"""
Explanation: when you pass any object to a function, any change done to the object will affect it globally:
End of explanation
"""
def change(l):
l=[1,2,34]
return 0
mylist=[3,6]
change(mylist)
print mylist
"""
Explanation: but reassignments inside the function is not reflected globally
End of explanation
"""
import math
math.sin(0.1)
help(math)
"""
Explanation: 2.6 Using modules
One of the most useful things about python is that there are tons of modules available, that do many of the tasks you might need to do:
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.17/_downloads/64973b551d79441db82e99316267b5b7/plot_whitened.ipynb
|
bsd-3-clause
|
import mne
from mne.datasets import sample
"""
Explanation: Plotting whitened data
This tutorial demonstrates how to plot whitened evoked data.
Data are whitened for many processes, including dipole fitting, source
localization and some decoding algorithms. Viewing whitened data thus gives
a different perspective on the data that these algorithms operate on.
Let's start by loading some data and computing a signal (spatial) covariance
that we'll consider to be noise.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
events = mne.find_events(raw, stim_channel='STI 014')
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id=event_id, reject=reject)
# baseline noise cov, not a lot of samples
noise_cov = mne.compute_covariance(epochs, tmax=0., method='shrunk', rank=None,
verbose='error')
# butterfly mode shows the differences most clearly
raw.plot(events=events, butterfly=True)
raw.plot(noise_cov=noise_cov, events=events, butterfly=True)
"""
Explanation: Raw data with whitening
<div class="alert alert-info"><h4>Note</h4><p>In the :meth:`mne.io.Raw.plot` with ``noise_cov`` supplied,
you can press they "w" key to turn whitening on and off.</p></div>
End of explanation
"""
epochs.plot()
epochs.plot(noise_cov=noise_cov)
"""
Explanation: Epochs with whitening
End of explanation
"""
evoked = epochs.average()
evoked.plot(time_unit='s')
evoked.plot(noise_cov=noise_cov, time_unit='s')
"""
Explanation: Evoked data with whitening
End of explanation
"""
evoked.plot_white(noise_cov=noise_cov, time_unit='s')
"""
Explanation: Evoked data with scaled whitening
The :meth:mne.Evoked.plot_white function takes an additional step of
scaling the whitened plots to show how well the assumption of Gaussian
noise is satisfied by the data:
End of explanation
"""
evoked.comment = 'All trials'
evoked.plot_topo(title='Evoked data')
evoked.plot_topo(noise_cov=noise_cov, title='Whitened evoked data')
"""
Explanation: Topographic plot with whitening
End of explanation
"""
|
supergis/git_notebook
|
geospatial/openstreetmap/osm-json2geometry.ipynb
|
gpl-3.0
|
from pprint import *
import pyspark
from pyspark import SparkConf, SparkContext
sc = None
print(pyspark.status)
"""
Explanation: 采用Spark处理OpenStreetMap的osm文件。
Spark DataFrame参考: https://spark.apache.org/docs/1.3.0/sql-programming-guide.html#interoperating-with-rdds
by openthings@163.com,2016-4-23. License: GPL, MUST include this header.
说明:
使用sc.read.json()读取json文件(osm-all2json从osm转换而来),生成Spark的DataFrame对象。
查询从json文件创建的DataFrame对象,创建新的DataFrame。
读取way的nd索引(Node的ID),并构建way的geometry对象。
后续:
将数据保存到MongoDB/Hbase/HDFS等其它存储系统。
将数据进行分块,保存为分区域的DataFrame数据集合。
将DataFrame转换为GeoPandas.DataFrame,然后保存为shape files。
将DataFrame直接转换为GIScript.Dataset,然后保存为UDB files。
End of explanation
"""
conf = (SparkConf()
.setMaster("local")
.setAppName("MyApp")
.set("spark.executor.memory", "1g"))
if sc is None:
sc = SparkContext(conf = conf)
print(type(sc))
print(sc)
print(sc.applicationId)
"""
Explanation: 配置环境SparkConf和创建SparkContext运行环境对象。
End of explanation
"""
print(conf)
conf_kv = conf.getAll()
pprint(conf_kv)
"""
Explanation: 显示Spark的配置信息。
End of explanation
"""
fl = sc.textFile("../data/muenchen.osm_node.json")
for node in fl.collect()[0:2]:
node_dict = eval(node)
pprint(node_dict)
"""
Explanation: Spark的文本RDD操作。
按照文本方式读取osm的json格式文件,将JSON字符串转为dict对象。
End of explanation
"""
lines = fl.filter(lambda line: "soemisch" in line)
print(lines.count())
print(lines.collect()[0])
"""
Explanation: 从RDD中按照文本方式进行关键词查询。
End of explanation
"""
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
nodeDF = sqlContext.read.json("../data/muenchen.osm_node.json")
#print(nodeDF)
nodeDF.printSchema()
"""
Explanation: Spark的DataFrame操作。
使用SQL引擎直接生成Spark的DataFrame对象,支持查询等操作。
读取osm的node数据表。
End of explanation
"""
nodeDF.select("id","lat","lon","timestamp").show(10,True)
#help(nodeDF.show)
"""
Explanation: Spark DataFrame的 select() 操作。show()方法可以指定最多显示的记录数。
End of explanation
"""
wayDF = sqlContext.read.json("../data/muenchen.osm_way.json")
wayDF.printSchema()
"""
Explanation: 读取osm的way表。
End of explanation
"""
wayDF.select("id","tag","nd").show(10,True)
"""
Explanation: 查看way表中的数据。
End of explanation
"""
def sepator():
print("===============================================================")
#### 将给定way的nd对象的nodeID列表提取出来,并生成一个查询的过滤字符串。
def nodelist_way(nd_list):
print("WayID:",nd_list["id"],"\tNode count:",len(nd_list["nd"]))
ndFilter = "("
for nd in nd_list["nd"]:
ndFilter = ndFilter + nd["ref"] + ","
ndFilter = ndFilter.strip(',') + ")"
print(ndFilter)
return ndFilter
#### 根据way的节点ID从nodeDF中提取node信息,包含经纬度等坐标域。
def nodecoord_way(nodeID_list):
nodeDF.registerTempTable("nodeDF")
nodeset = sqlContext.sql("select id,lat,lon,timestamp from nodeDF where nodeDF.id in " + nodeID_list)
nodeset.show(10,True)
"""
Explanation: 构建way的几何对象。
从way中的每一条记录生成NodeID的字符串列表,用于下一步查询node的坐标信息表。
End of explanation
"""
for wayset in wayDF.select("id","nd").collect()[4:6]:
ndFilter = nodelist_way(wayset)
nodecoord_way(ndFilter)
#pprint(nd_list["nd"])
#sepator()
"""
Explanation: 将多个way的node信息查询出来。
End of explanation
"""
relationDF = sqlContext.read.json("../data/muenchen.osm_relation.json")
#print(relationDF)
relationDF.printSchema()
relationDF.show(10,True)
"""
Explanation: 将经纬度坐标转换为一个GeoJSON的几何对象表示,并保存回way的geometry字段。
End of explanation
"""
def myFunc(s):
words = s.split()
return len(words)
#wc = fl.map(myFunc).collect()
wc = fl.map(myFunc).collect()
wc
#df = sqlContext.read.format("com.databricks.spark.xml").option("rowTag", "result").load("../data/muenchen.osm")
#df
"""
Explanation: 查找指定关键词。
自定义函数处理。
End of explanation
"""
|
hunterherrin/phys202-2015-work
|
assignments/assignment05/InteractEx01.ipynb
|
mit
|
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
"""
Explanation: Interact Exercise 01
Import
End of explanation
"""
def print_sum(a, b):
"""Print the sum of the arguments a and b."""
print(a+b)
"""
Explanation: Interact basics
Write a print_sum function that prints the sum of its arguments a and b.
End of explanation
"""
interact(print_sum, a=(-10.0,10.0,0.1), b=(-8,8,2));
assert True # leave this for grading the print_sum exercise
"""
Explanation: Use the interact function to interact with the print_sum function.
a should be a floating point slider over the interval [-10., 10.] with step sizes of 0.1
b should be an integer slider the interval [-8, 8] with step sizes of 2.
End of explanation
"""
def print_string(s, length=False):
"""Print the string s and optionally its length."""
print(s)
if length == True:
print(len(s))
"""
Explanation: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
End of explanation
"""
interact(print_string, length=True, s='Hi')
assert True # leave this for grading the print_string exercise
"""
Explanation: Use the interact function to interact with the print_string function.
s should be a textbox with the initial value "Hello World!".
length should be a checkbox with an initial value of True.
End of explanation
"""
|
chetan51/nupic.research
|
projects/sdr_math/sdr_math_neuron_paper.ipynb
|
gpl-3.0
|
oxp = Symbol("Omega_x'")
b = Symbol("b")
n = Symbol("n")
theta = Symbol("theta")
s = Symbol("s")
a = Symbol("a")
subsampledOmega = (binomial(s, b) * binomial(n - s, a - b)) / binomial(n, a)
subsampledFpF = Sum(subsampledOmega, (b, theta, s))
subsampledOmegaSlow = (binomial(s, b) * binomial(n - s, a - b))
subsampledFpFSlow = Sum(subsampledOmegaSlow, (b, theta, s))/ binomial(n, a)
display(subsampledFpF)
display(subsampledFpFSlow)
"""
Explanation: Equation for Neuron Paper
A dendritic segment can robustly classify a pattern by subsampling a small number of cells from a larger population. Assuming a random distribution of patterns, the exact probability of a false match is given by the following equation:
End of explanation
"""
display("n=10000, a=64, s=24, theta=12", subsampledFpF.subs(s,24).subs(n, 10000).subs(a, 64).subs(theta, 12).evalf())
display("n=10000, a=300, s=24, theta=12", subsampledFpFSlow.subs(theta, 12).subs(s, 24).subs(n, 10000).subs(a, 300).evalf())
display("n=2000, a=40, s=20, theta=10", subsampledFpF.subs(theta, 15).subs(s, 20).subs(n, 2000).subs(a, 40).evalf(100))
"""
Explanation: where n refers to the size of the population of cells, a is the number of active cells at any instance in time, s is the number of actual synapses on a dendritic segment, and θ is the threshold for NMDA spikes. Following (Ahmad & Hawkins, 2015), the numerator counts the number of possible ways θ or more cells can match a fixed set of s synapses. The denominator counts the number of ways a cells out of n can be active.
Example usage
End of explanation
"""
T1B = subsampledFpFSlow.subs(n, 100000).subs(a, 2000).subs(theta,s).evalf()
print("n=100000, a=2000, theta=s")
display("s=6",T1B.subs(s,6).evalf())
display("s=8",T1B.subs(s,8).evalf())
display("s=10",T1B.subs(s,10).evalf())
"""
Explanation: Table 1B
End of explanation
"""
T1C = subsampledFpFSlow.subs(n, 100000).subs(a, 2000).subs(s,2*theta).evalf()
print("n=10000, a=300, s=2*theta")
display("theta=6",T1C.subs(theta,6).evalf())
display("theta=8",T1C.subs(theta,8).evalf())
display("theta=10",T1C.subs(theta,10).evalf())
display("theta=12",T1C.subs(theta,12).evalf())
"""
Explanation: Table 1C
End of explanation
"""
m = Symbol("m")
T1D = subsampledFpF.subs(n, 100000).subs(a, 2000).subs(s,2*m*theta).evalf()
print("n=100000, a=2000, s=2*m*theta")
display("theta=10, m=2",T1D.subs(theta,10).subs(m,2).evalf())
display("theta=10, m=4",T1D.subs(theta,10).subs(m,4).evalf())
display("theta=10, m=6",T1D.subs(theta,10).subs(m,6).evalf())
display("theta=20, m=6",T1D.subs(theta,20).subs(m,6).evalf())
"""
Explanation: Table 1D
End of explanation
"""
eq1 = subsampledFpFSlow.subs(s, 64).subs(theta, 12)
print("a=64 cells active, s=16 synapses on segment, dendritic threshold is theta=8\n")
errorList = []
nList = []
table = [["n", "sparsity", "prob(fp)"]]
for n0 in range(300,20100,200):
error = eq1.subs(n, n0).subs(a,64).evalf()
errorList += [error]
nList += [n0]
table.append([n0, 64/n0, error])
print (tabulate(table, headers="firstrow", tablefmt="grid"))
print(errorList)
print(nList)
"""
Explanation: Charts for SDR Paper
The following sections calculates the numbers for some of the SDR paper charts.
Importance of large n
End of explanation
"""
eq1 = subsampledFpFSlow.subs(s, 24).subs(theta, 12)
print ("a=n/2 cells active, s=24 synapses on segment, dendritic threshold is theta=12\n")
errorList = []
nList = []
table = [["n", "sparsity", "prob(fp)"]]
for n0 in range(300,3700,200):
error = eq1.subs(n, n0).subs(a,n0/2).evalf()
errorList += [error]
nList += [n0]
table.append([n0, (n0/2)/n0, error])
print (tabulate(table, headers="firstrow", tablefmt="grid"))
print (errorList)
print (nList)
"""
Explanation: n = a/2
End of explanation
"""
print ("2% sparsity with n=400")
print subsampledFpFSlow.subs(s, 4).subs(a, 8).subs(theta, 2).subs(n,400).evalf()
print ("2% sparsity with n=4000")
print subsampledFpFSlow.subs(s, 4).subs(a, 400).subs(theta, 2).subs(n,4000).evalf()
"""
Explanation: Small sparsity is insufficient
End of explanation
"""
eq2 = subsampledFpFSlow.subs(n, 10000).subs(a, 300)
print("a=200 cells active out of population of n=10000 cells\n")
errorList = []
sList = []
for s0 in range(2,31,1):
print "synapses s = %3d, theta = s/2 = %3d, probability of false match = "%(s0,s0/2), eq2.subs(s, s0).subs(theta,s0/2).evalf()
errorList += [eq2.subs(s, s0).subs(theta,s0/2).evalf()]
sList += [s0]
print (errorList)
print (sList)
"""
Explanation: A small subsample can be very reliable (but not too small)
End of explanation
"""
b = Symbol("b")
v = Symbol("v")
theta = Symbol("theta")
s = Symbol("s")
a = Symbol("a")
overlapSetNoise = (binomial(s, b) * binomial(a - s, v - b)) / binomial(a, v)
noiseFN = Sum(overlapSetNoise, (b, s-theta+1, s))
eqn = noiseFN.subs(s, 30).subs(a, 128)
print ("a=128 cells active with segment containing s=30 synapses (n doesn't matter here)\n")
for t in range(8,20,4):
print ("theta = ",t)
errorList = []
noiseList = []
noisePct = 0.05
while noisePct <= 0.85:
noise = int(round(noisePct*128,0))
errorList += [eqn.subs(v, noise).subs(theta,t).evalf()]
noiseList += [noise/128.0]
noisePct += 0.05
print (errorList)
print (noiseList)
"""
Explanation: Impact of noise on false negatives
End of explanation
"""
eqn = noiseFN
eqn = eqn.subs(s, 20).subs(a, 40).subs(c, 40)
for t in range(8, 20, 4):
print "theta = ",t
errorList = []
jaccardSimilarityList = []
noiseList = []
noisePct = 0.00
while noisePct <= 1:
noise = int(round(noisePct*40,0))
error = eqn.subs(v, noise).subs(theta,t).evalf()
errorList.append(error)
jaccardSimilarity = 1 - error
jaccardSimilarityList.append(jaccardSimilarity)
noiseList += [noise/40.0]
noisePct += 0.05
print errorList
print jaccardSimilarityList
print noiseList
"""
Explanation: Impact of noise on first-order TMs trained on one sequence
We assume that false positives are impossible -- this is in fact not strictly true, but the number of segments is so low that it might as well be.
End of explanation
"""
w0 = 32
print ("a=%d cells active, s=%d synapses on segment, dendritic threshold is s/2\n" % (w0,w0))
errorList = []
nList = []
for n0 in range(50,500,50):
w0 = n0/2
eq1 = subsampledFpFSlow.subs(s, w0).subs(theta, w0/2)
error = eq1.subs(n, n0).subs(a,w0).evalf()
errorList += [error]
nList += [n0]
print ("population n = %5d, sparsity = %7.4f%%, probability of false match = "%(n0, float(w0)/n0), error)
print (errorList)
print (nList)
"""
Explanation: Charts for BAMI
End of explanation
"""
oxd = Symbol("omega")
n = Symbol("n")
a = Symbol("a")
b = Symbol("b")
theta = Symbol("theta")
s = Symbol("s")
m = Symbol("m")
q = Symbol("q")
p = (1 - a/n) ** m
ss = Min(floor((1 - (1 - s/n)**m)*n), a)
expectedUnionOverlap = binomial(((1 - p)*n), b) * binomial(((n - (1 - p)*n)), a - b) / binomial(n, a)
expectedUnionFP = Sum(expectedUnionOverlap, (b, theta, ss))
display(expectedUnionFP)
eq1 = expectedUnionFP.subs(a, 40).subs(n, 4000).subs(theta, 15).subs(s, 30)
display(eq1)
for num_patterns in range(100):
eq2 = eq1.subs(m, num_patterns)
#display(eq2)
error_prob = eq2.evalf(100)
print num_patterns, error_prob
eq1 = p.subs(a, 40).subs(n, 4000)
for num_patterns in range(100):
expected_distinct = eq1.subs(m, num_patterns).evalf(10)
print (num_patterns, expected_distinct, (1 - expected_distinct))
"""
Explanation: Union Property Math
Here, we calculate the expected error rates for unions of various sizes, with varying dimensions and sparsities.
This is used in plots for the Neuron paper.
End of explanation
"""
eq1 = subsampledFpF
expected_num_segments_per_cell = (19*1000)*a/n
eq2 = 1 - ((1 - subsampledFpF)**expected_num_segments_per_cell)
jaccard = a/(a + eq2*(n - a))
display(jaccard)
jaccard2 = jaccard.subs(a, 64).subs(theta, 10).subs(s, 25)
display(jaccard2)
print ("[")
for i in range(300, 4100, 100):
eq4 = jaccard2.subs(n, i)
print (i, str(eq4.evalf(10)), expected_num_segments_per_cell.subs(a, 64).subs(n, i).evalf(), eq2.subs(a, 64).subs(theta, 10).subs(s, 25).subs(n, i).evalf())
print ("]")
"""
Explanation: Expected performance for first-order TMs with varying sparsity
End of explanation
"""
|
ThyrixYang/LearningNotes
|
MOOC/stanford_cnn_cs231n/assignment3(without_extra)/.ipynb_checkpoints/StyleTransfer-TensorFlow-checkpoint.ipynb
|
gpl-3.0
|
%load_ext autoreload
%autoreload 2
from scipy.misc import imread, imresize
import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
# Helper functions to deal with image preprocessing
from cs231n.image_utils import load_image, preprocess_image, deprocess_image
%matplotlib inline
def get_session():
"""Create a session that dynamically allocates memory."""
# See: https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
return session
def rel_error(x,y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Older versions of scipy.misc.imresize yield different results
# from newer versions, so we check to make sure scipy is up to date.
def check_scipy():
import scipy
vnum = int(scipy.__version__.split('.')[1])
assert vnum >= 16, "You must install SciPy >= 0.16.0 to complete this notebook."
check_scipy()
"""
Explanation: Style Transfer
In this notebook we will implement the style transfer technique from "Image Style Transfer Using Convolutional Neural Networks" (Gatys et al., CVPR 2015).
The general idea is to take two images, and produce a new image that reflects the content of one but the artistic "style" of the other. We will do this by first formulating a loss function that matches the content and style of each respective image in the feature space of a deep network, and then performing gradient descent on the pixels of the image itself.
The deep network we use as a feature extractor is SqueezeNet, a small model that has been trained on ImageNet. You could use any network, but we chose SqueezeNet here for its small size and efficiency.
Here's an example of the images you'll be able to produce by the end of this notebook:
Setup
End of explanation
"""
from cs231n.classifiers.squeezenet import SqueezeNet
import tensorflow as tf
tf.reset_default_graph() # remove all existing variables in the graph
sess = get_session() # start a new Session
# Load pretrained SqueezeNet model
SAVE_PATH = 'cs231n/datasets/squeezenet.ckpt'
if not os.path.exists(SAVE_PATH):
raise ValueError("You need to download SqueezeNet!")
model = SqueezeNet(save_path=SAVE_PATH, sess=sess)
# Load data for testing
content_img_test = preprocess_image(load_image('styles/tubingen.jpg', size=192))[None]
style_img_test = preprocess_image(load_image('styles/starry_night.jpg', size=192))[None]
answers = np.load('style-transfer-checks-tf.npz')
"""
Explanation: Load the pretrained SqueezeNet model. This model has been ported from PyTorch, see cs231n/classifiers/squeezenet.py for the model architecture.
To use SqueezeNet, you will need to first download the weights by changing into the cs231n/datasets directory and running get_squeezenet_tf.sh . Note that if you ran get_assignment3_data.sh then SqueezeNet will already be downloaded.
End of explanation
"""
def content_loss(content_weight, content_current, content_original):
"""
Compute the content loss for style transfer.
Inputs:
- content_weight: scalar constant we multiply the content_loss by.
- content_current: features of the current image, Tensor with shape [1, height, width, channels]
- content_target: features of the content image, Tensor with shape [1, height, width, channels]
Returns:
- scalar content loss
"""
pass
"""
Explanation: Computing Loss
We're going to compute the three components of our loss function now. The loss function is a weighted sum of three terms: content loss + style loss + total variation loss. You'll fill in the functions that compute these weighted terms below.
Content loss
We can generate an image that reflects the content of one image and the style of another by incorporating both in our loss function. We want to penalize deviations from the content of the content image and deviations from the style of the style image. We can then use this hybrid loss function to perform gradient descent not on the parameters of the model, but instead on the pixel values of our original image.
Let's first write the content loss function. Content loss measures how much the feature map of the generated image differs from the feature map of the source image. We only care about the content representation of one layer of the network (say, layer $\ell$), that has feature maps $A^\ell \in \mathbb{R}^{1 \times C_\ell \times H_\ell \times W_\ell}$. $C_\ell$ is the number of filters/channels in layer $\ell$, $H_\ell$ and $W_\ell$ are the height and width. We will work with reshaped versions of these feature maps that combine all spatial positions into one dimension. Let $F^\ell \in \mathbb{R}^{N_\ell \times M_\ell}$ be the feature map for the current image and $P^\ell \in \mathbb{R}^{N_\ell \times M_\ell}$ be the feature map for the content source image where $M_\ell=H_\ell\times W_\ell$ is the number of elements in each feature map. Each row of $F^\ell$ or $P^\ell$ represents the vectorized activations of a particular filter, convolved over all positions of the image. Finally, let $w_c$ be the weight of the content loss term in the loss function.
Then the content loss is given by:
$L_c = w_c \times \sum_{i,j} (F_{ij}^{\ell} - P_{ij}^{\ell})^2$
End of explanation
"""
def content_loss_test(correct):
content_layer = 3
content_weight = 6e-2
c_feats = sess.run(model.extract_features()[content_layer], {model.image: content_img_test})
bad_img = tf.zeros(content_img_test.shape)
feats = model.extract_features(bad_img)[content_layer]
student_output = sess.run(content_loss(content_weight, c_feats, feats))
error = rel_error(correct, student_output)
print('Maximum error is {:.3f}'.format(error))
content_loss_test(answers['cl_out'])
"""
Explanation: Test your content loss. You should see errors less than 0.001.
End of explanation
"""
def gram_matrix(features, normalize=True):
"""
Compute the Gram matrix from features.
Inputs:
- features: Tensor of shape (1, H, W, C) giving features for
a single image.
- normalize: optional, whether to normalize the Gram matrix
If True, divide the Gram matrix by the number of neurons (H * W * C)
Returns:
- gram: Tensor of shape (C, C) giving the (optionally normalized)
Gram matrices for the input image.
"""
pass
"""
Explanation: Style loss
Now we can tackle the style loss. For a given layer $\ell$, the style loss is defined as follows:
First, compute the Gram matrix G which represents the correlations between the responses of each filter, where F is as above. The Gram matrix is an approximation to the covariance matrix -- we want the activation statistics of our generated image to match the activation statistics of our style image, and matching the (approximate) covariance is one way to do that. There are a variety of ways you could do this, but the Gram matrix is nice because it's easy to compute and in practice shows good results.
Given a feature map $F^\ell$ of shape $(1, C_\ell, M_\ell)$, the Gram matrix has shape $(1, C_\ell, C_\ell)$ and its elements are given by:
$$G_{ij}^\ell = \sum_k F^{\ell}{ik} F^{\ell}{jk}$$
Assuming $G^\ell$ is the Gram matrix from the feature map of the current image, $A^\ell$ is the Gram Matrix from the feature map of the source style image, and $w_\ell$ a scalar weight term, then the style loss for the layer $\ell$ is simply the weighted Euclidean distance between the two Gram matrices:
$$L_s^\ell = w_\ell \sum_{i, j} \left(G^\ell_{ij} - A^\ell_{ij}\right)^2$$
In practice we usually compute the style loss at a set of layers $\mathcal{L}$ rather than just a single layer $\ell$; then the total style loss is the sum of style losses at each layer:
$$L_s = \sum_{\ell \in \mathcal{L}} L_s^\ell$$
Begin by implementing the Gram matrix computation below:
End of explanation
"""
def gram_matrix_test(correct):
gram = gram_matrix(model.extract_features()[5])
student_output = sess.run(gram, {model.image: style_img_test})
error = rel_error(correct, student_output)
print('Maximum error is {:.3f}'.format(error))
gram_matrix_test(answers['gm_out'])
"""
Explanation: Test your Gram matrix code. You should see errors less than 0.001.
End of explanation
"""
def style_loss(feats, style_layers, style_targets, style_weights):
"""
Computes the style loss at a set of layers.
Inputs:
- feats: list of the features at every layer of the current image, as produced by
the extract_features function.
- style_layers: List of layer indices into feats giving the layers to include in the
style loss.
- style_targets: List of the same length as style_layers, where style_targets[i] is
a Tensor giving the Gram matrix the source style image computed at
layer style_layers[i].
- style_weights: List of the same length as style_layers, where style_weights[i]
is a scalar giving the weight for the style loss at layer style_layers[i].
Returns:
- style_loss: A Tensor contataining the scalar style loss.
"""
# Hint: you can do this with one for loop over the style layers, and should
# not be very much code (~5 lines). You will need to use your gram_matrix function.
pass
"""
Explanation: Next, implement the style loss:
End of explanation
"""
def style_loss_test(correct):
style_layers = [1, 4, 6, 7]
style_weights = [300000, 1000, 15, 3]
feats = model.extract_features()
style_target_vars = []
for idx in style_layers:
style_target_vars.append(gram_matrix(feats[idx]))
style_targets = sess.run(style_target_vars,
{model.image: style_img_test})
s_loss = style_loss(feats, style_layers, style_targets, style_weights)
student_output = sess.run(s_loss, {model.image: content_img_test})
error = rel_error(correct, student_output)
print('Error is {:.3f}'.format(error))
style_loss_test(answers['sl_out'])
"""
Explanation: Test your style loss implementation. The error should be less than 0.001.
End of explanation
"""
def tv_loss(img, tv_weight):
"""
Compute total variation loss.
Inputs:
- img: Tensor of shape (1, H, W, 3) holding an input image.
- tv_weight: Scalar giving the weight w_t to use for the TV loss.
Returns:
- loss: Tensor holding a scalar giving the total variation loss
for img weighted by tv_weight.
"""
# Your implementation should be vectorized and not require any loops!
pass
"""
Explanation: Total-variation regularization
It turns out that it's helpful to also encourage smoothness in the image. We can do this by adding another term to our loss that penalizes wiggles or "total variation" in the pixel values.
You can compute the "total variation" as the sum of the squares of differences in the pixel values for all pairs of pixels that are next to each other (horizontally or vertically). Here we sum the total-variation regualarization for each of the 3 input channels (RGB), and weight the total summed loss by the total variation weight, $w_t$:
$L_{tv} = w_t \times \sum_{c=1}^3\sum_{i=1}^{H-1} \sum_{j=1}^{W-1} \left( (x_{i,j+1, c} - x_{i,j,c})^2 + (x_{i+1, j,c} - x_{i,j,c})^2 \right)$
In the next cell, fill in the definition for the TV loss term. To receive full credit, your implementation should not have any loops.
End of explanation
"""
def tv_loss_test(correct):
tv_weight = 2e-2
t_loss = tv_loss(model.image, tv_weight)
student_output = sess.run(t_loss, {model.image: content_img_test})
error = rel_error(correct, student_output)
print('Error is {:.3f}'.format(error))
tv_loss_test(answers['tv_out'])
"""
Explanation: Test your TV loss implementation. Error should be less than 0.001.
End of explanation
"""
def style_transfer(content_image, style_image, image_size, style_size, content_layer, content_weight,
style_layers, style_weights, tv_weight, init_random = False):
"""Run style transfer!
Inputs:
- content_image: filename of content image
- style_image: filename of style image
- image_size: size of smallest image dimension (used for content loss and generated image)
- style_size: size of smallest style image dimension
- content_layer: layer to use for content loss
- content_weight: weighting on content loss
- style_layers: list of layers to use for style loss
- style_weights: list of weights to use for each layer in style_layers
- tv_weight: weight of total variation regularization term
- init_random: initialize the starting image to uniform random noise
"""
# Extract features from the content image
content_img = preprocess_image(load_image(content_image, size=image_size))
feats = model.extract_features(model.image)
content_target = sess.run(feats[content_layer],
{model.image: content_img[None]})
# Extract features from the style image
style_img = preprocess_image(load_image(style_image, size=style_size))
style_feat_vars = [feats[idx] for idx in style_layers]
style_target_vars = []
# Compute list of TensorFlow Gram matrices
for style_feat_var in style_feat_vars:
style_target_vars.append(gram_matrix(style_feat_var))
# Compute list of NumPy Gram matrices by evaluating the TensorFlow graph on the style image
style_targets = sess.run(style_target_vars, {model.image: style_img[None]})
# Initialize generated image to content image
if init_random:
img_var = tf.Variable(tf.random_uniform(content_img[None].shape, 0, 1), name="image")
else:
img_var = tf.Variable(content_img[None], name="image")
# Extract features on generated image
feats = model.extract_features(img_var)
# Compute loss
c_loss = content_loss(content_weight, feats[content_layer], content_target)
s_loss = style_loss(feats, style_layers, style_targets, style_weights)
t_loss = tv_loss(img_var, tv_weight)
loss = c_loss + s_loss + t_loss
# Set up optimization hyperparameters
initial_lr = 3.0
decayed_lr = 0.1
decay_lr_at = 180
max_iter = 200
# Create and initialize the Adam optimizer
lr_var = tf.Variable(initial_lr, name="lr")
# Create train_op that updates the generated image when run
with tf.variable_scope("optimizer") as opt_scope:
train_op = tf.train.AdamOptimizer(lr_var).minimize(loss, var_list=[img_var])
# Initialize the generated image and optimization variables
opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=opt_scope.name)
sess.run(tf.variables_initializer([lr_var, img_var] + opt_vars))
# Create an op that will clamp the image values when run
clamp_image_op = tf.assign(img_var, tf.clip_by_value(img_var, -1.5, 1.5))
f, axarr = plt.subplots(1,2)
axarr[0].axis('off')
axarr[1].axis('off')
axarr[0].set_title('Content Source Img.')
axarr[1].set_title('Style Source Img.')
axarr[0].imshow(deprocess_image(content_img))
axarr[1].imshow(deprocess_image(style_img))
plt.show()
plt.figure()
# Hardcoded handcrafted
for t in range(max_iter):
# Take an optimization step to update img_var
sess.run(train_op)
if t < decay_lr_at:
sess.run(clamp_image_op)
if t == decay_lr_at:
sess.run(tf.assign(lr_var, decayed_lr))
if t % 100 == 0:
print('Iteration {}'.format(t))
img = sess.run(img_var)
plt.imshow(deprocess_image(img[0], rescale=True))
plt.axis('off')
plt.show()
print('Iteration {}'.format(t))
img = sess.run(img_var)
plt.imshow(deprocess_image(img[0], rescale=True))
plt.axis('off')
plt.show()
"""
Explanation: Style Transfer
Lets put it all together and make some beautiful images! The style_transfer function below combines all the losses you coded up above and optimizes for an image that minimizes the total loss.
End of explanation
"""
# Composition VII + Tubingen
params1 = {
'content_image' : 'styles/tubingen.jpg',
'style_image' : 'styles/composition_vii.jpg',
'image_size' : 192,
'style_size' : 512,
'content_layer' : 3,
'content_weight' : 5e-2,
'style_layers' : (1, 4, 6, 7),
'style_weights' : (20000, 500, 12, 1),
'tv_weight' : 5e-2
}
style_transfer(**params1)
# Scream + Tubingen
params2 = {
'content_image':'styles/tubingen.jpg',
'style_image':'styles/the_scream.jpg',
'image_size':192,
'style_size':224,
'content_layer':3,
'content_weight':3e-2,
'style_layers':[1, 4, 6, 7],
'style_weights':[200000, 800, 12, 1],
'tv_weight':2e-2
}
style_transfer(**params2)
# Starry Night + Tubingen
params3 = {
'content_image' : 'styles/tubingen.jpg',
'style_image' : 'styles/starry_night.jpg',
'image_size' : 192,
'style_size' : 192,
'content_layer' : 3,
'content_weight' : 6e-2,
'style_layers' : [1, 4, 6, 7],
'style_weights' : [300000, 1000, 15, 3],
'tv_weight' : 2e-2
}
style_transfer(**params3)
"""
Explanation: Generate some pretty pictures!
Try out style_transfer on the three different parameter sets below. Make sure to run all three cells. Feel free to add your own, but make sure to include the results of style transfer on the third parameter set (starry night) in your submitted notebook.
The content_image is the filename of content image.
The style_image is the filename of style image.
The image_size is the size of smallest image dimension of the content image (used for content loss and generated image).
The style_size is the size of smallest style image dimension.
The content_layer specifies which layer to use for content loss.
The content_weight gives weighting on content loss in the overall loss function. Increasing the value of this parameter will make the final image look more realistic (closer to the original content).
style_layers specifies a list of which layers to use for style loss.
style_weights specifies a list of weights to use for each layer in style_layers (each of which will contribute a term to the overall style loss). We generally use higher weights for the earlier style layers because they describe more local/smaller scale features, which are more important to texture than features over larger receptive fields. In general, increasing these weights will make the resulting image look less like the original content and more distorted towards the appearance of the style image.
tv_weight specifies the weighting of total variation regularization in the overall loss function. Increasing this value makes the resulting image look smoother and less jagged, at the cost of lower fidelity to style and content.
Below the next three cells of code (in which you shouldn't change the hyperparameters), feel free to copy and paste the parameters to play around them and see how the resulting image changes.
End of explanation
"""
# Feature Inversion -- Starry Night + Tubingen
params_inv = {
'content_image' : 'styles/tubingen.jpg',
'style_image' : 'styles/starry_night.jpg',
'image_size' : 192,
'style_size' : 192,
'content_layer' : 3,
'content_weight' : 6e-2,
'style_layers' : [1, 4, 6, 7],
'style_weights' : [0, 0, 0, 0], # we discard any contributions from style to the loss
'tv_weight' : 2e-2,
'init_random': True # we want to initialize our image to be random
}
style_transfer(**params_inv)
"""
Explanation: Feature Inversion
The code you've written can do another cool thing. In an attempt to understand the types of features that convolutional networks learn to recognize, a recent paper [1] attempts to reconstruct an image from its feature representation. We can easily implement this idea using image gradients from the pretrained network, which is exactly what we did above (but with two different feature representations).
Now, if you set the style weights to all be 0 and initialize the starting image to random noise instead of the content source image, you'll reconstruct an image from the feature representation of the content source image. You're starting with total noise, but you should end up with something that looks quite a bit like your original image.
(Similarly, you could do "texture synthesis" from scratch if you set the content weight to 0 and initialize the starting image to random noise, but we won't ask you to do that here.)
[1] Aravindh Mahendran, Andrea Vedaldi, "Understanding Deep Image Representations by Inverting them", CVPR 2015
End of explanation
"""
|
marcinofulus/PR2014
|
CUDA/iCSE_PR_Rownanie_Logistyczne.ipynb
|
gpl-3.0
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pycuda.gpuarray as gpuarray
from pycuda.curandom import rand as curand
from pycuda.compiler import SourceModule
import pycuda.driver as cuda
try:
ctx.pop()
ctx.detach()
except:
print ("No CTX!")
cuda.init()
device = cuda.Device(0)
ctx = device.make_context()
print (device.name(), device.compute_capability(),device.total_memory()/1024.**3,"GB")
print ("a tak wogóle to mamy tu:",cuda.Device.count(), " urządzenia")
"""
Explanation: Diagram bifurkacyjny dla równania logistycznego $x \to a x (1-x)$
Równanie logistyczne jest niezwykle prostym równaniem iteracyjnym wykazującym zaskakująco złożone zachowanie. Jego własności są od lat siedemdziesiątych przedmiotem poważnych prac matematycznych. Pomimo tego wciąż wiele własności jest niezbadanych i zachowanie się rozwiązań tego równania jest dostępne tylko do analizy numerycznej.
Poniższy przykład wykorzystuje pyCUDA do szybkiego obliczenia tak zwanego diagramu bifurkacyjnego równania logistycznego. Uzyskanie takiego diagramu wymaga jednoczesnej symulacji wielu równań z różnymi warunkami początkowymi i różnymi parametrami. Jest to idealne zadanie dla komputera równoległego.
Sposób implementacji
Pierwszą implementacja naszego algorytmu będzie zastosowanie szablonu jądra zwanego
ElementwiseKernel
Jest to prosty sposób na wykonanie tej samej operacji na dużym wektorze danych.
End of explanation
"""
import numpy as np
Nx = 1024
Na = 1024
a = np.linspace(3.255,4,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
from pycuda.elementwise import ElementwiseKernel
iterate = ElementwiseKernel(
"float *a, float *x",
"x[i] = a[i]*x[i]*(1.0f-x[i])",
"iterate")
%%time
Niter = 1000
for i in range(Niter):
iterate(a_gpu,x_gpu)
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
plt.figure(num=1, figsize=(10, 6))
every = 10
plt.plot(a[::every],x[::every],'.',markersize=1)
plt.plot([3.83,3.83],[0,1])
"""
Explanation: Jądro Elementwise
Zdefiniujemy sobie jądro, które dla wektora stanów początkowych, element po elementcie wykona iteracje rówania logistycznego. Ponieważ będziemy chcieli wykonać powyższe iteracje dla różnych parametrów $a$, zdefiniujemy nasze jądro tak by brało zarówno wektor wartości paramteru $a$ jak i wektor wartości początkowych. Ponieważ będziemy mieli tą samą wartość parametru $a$ dla wielu wartości początkowych to wykorzystamy użyteczną w tym przypadku funkcję numpy:
a = np.repeat(a,Nx)
End of explanation
"""
import pycuda.gpuarray as gpuarray
from pycuda.curandom import rand as curand
from pycuda.compiler import SourceModule
import pycuda.driver as cuda
try:
ctx.pop()
ctx.detach()
except:
print( "No CTX!")
cuda.init()
device = cuda.Device(0)
ctx = device.make_context()
mod = SourceModule("""
__global__ void logistic_iterations(float *a,float *x,int Niter)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
float a_ = a[idx];
float x_ = x[idx];
int i;
for (i=0;i<Niter;i++){
x_ = a_*x_*(1-x_);
}
x[idx] = x_;
}
""")
logistic_iterations = mod.get_function("logistic_iterations")
block_size=128
Nx = 10240
Na = 1024*2
blocks = Nx*Na//block_size
a = np.linspace(3.255,4,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
%%time
logistic_iterations(a_gpu,x_gpu, np.int32(10000),block=(block_size,1,1), grid=(blocks,1,1))
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
plt.figure(num=1, figsize=(9, 8))
every = 100
plt.plot(a[::every],x[::every],'.',markersize=1,alpha=1)
plt.plot([3.83,3.83],[0,1])
H, xedges, yedges = np.histogram2d(a,x,bins=(1024,1024))
plt.figure(num=1, figsize=(10,10))
plt.imshow(1-np.log(H.T+5e-1),origin='lower',cmap='gray')
"""
Explanation: Algorytm z pętlą wewnątrz jądra CUDA
Napiszmy teraz algorytm, który będzie iterował równanie Niter razy wewnątrz jednego wywołania jądra CUDA.
End of explanation
"""
%load_ext Cython
%%cython
def logistic_cpu(double a = 3.56994):
cdef double x
cdef int i
x = 0.1
for i in range(1000*1024*1024):
x = a*x*(1.0-x)
return x
%%time
logistic_cpu(1.235)
print("OK")
"""
Explanation: Porównanie z wersją CPU
Dla porównania napiszemy prosty program, który oblicza iteracje równania logistycznego na CPU. Zatosujemy język cython, który umożliwia automatyczne skompilowanie funkcji do wydajnego kodu, którego wydajność jest porównywalna z kodem napisanym w języku C lub podobnym.
W wyniku działania programu widzimy, że nasze jądro wykonuje obliczenia znacznie szybciej.
End of explanation
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
a1,a2 = 3,3.56994567
Nx = 1024
Na = 1024
a = np.linspace(a1,a2,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
x = x_gpu.get()
fig = plt.figure()
every = 1
Niter = 10000
for i in range(Niter):
if i%every==0:
plt.cla()
plt.xlim(a1,a2)
plt.ylim(0,1)
fig.suptitle("iteracja: %05d"%i)
plt.plot(a,x,'.',markersize=1)
plt.savefig("/tmp/%05d.png"%i)
if i>10:
every=2
if i>30:
every=10
if i>100:
every=50
if i>1000:
every=500
iterate(a_gpu,x_gpu)
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
%%sh
cd /tmp
time convert -delay 20 -loop 0 *.png anim_double.gif && rm *.png
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
block_size=128
Nx = 1024*5
Na = 1024*3
blocks = Nx*Na//block_size
nframes = 22
for i,(a1,a2) in enumerate(zip(np.linspace(3,3.77,nframes),np.linspace(4,3.83,nframes))):
a = np.linspace(a1,a2,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
x = x_gpu.get()
logistic_iterations(a_gpu,x_gpu, np.int32(10000),block=(block_size,1,1), grid=(blocks,1,1))
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
H, xedges, yedges = np.histogram2d(a,x,bins=(np.linspace(a1,a2,1024),np.linspace(0,1,1024)))
fig, ax = plt.subplots(figsize=[10,7])
ax.imshow(1-np.log(H.T+5e-1),origin='lower',cmap='gray',extent=[a1,a2,0,1])
#plt.xlim(a1,a2)
#plt.ylim(0,1)
ax.set_aspect(7/10*(a2-a1))
#fig.set_size_inches(8, 5)
fig.savefig("/tmp/zoom%05d.png"%i)
plt.close(fig)
%%sh
cd /tmp
time convert -delay 30 -loop 0 *.png anim_zoom.gif && rm *.png
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
block_size=128
Nx = 1024*5
Na = 1024*3
blocks = Nx*Na//block_size
a1,a2 = 1,4
x1,x2 = 0., 1
a = np.linspace(a1,a2,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
x = x_gpu.get()
logistic_iterations(a_gpu,x_gpu, np.int32(10000),block=(block_size,1,1), grid=(blocks,1,1))
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
H, xedges, yedges = np.histogram2d(a,x,bins=(np.linspace(a1,a2,1024),np.linspace(x1,x2,1024)))
fig, ax = plt.subplots(figsize=[10,7])
ax.imshow(1-np.log(H.T+5e-1),origin='lower',cmap='gray',extent=[a1,a2,x1,x2])
#plt.xlim(a1,a2)
#plt.ylim(0,1)
ax.set_aspect(7/10*(a2-a1)/(x2-x1))
#fig.set_size_inches(8, 5)
fig.savefig("/tmp/zoom.png")
plt.close(fig)
"""
Explanation: Wizualizacja wyników
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive/10_recommend/cf_softmax_model/solution/cfmodel_softmax_model_solution.ipynb
|
apache-2.0
|
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.6
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
#Let's install Altair for interactive visualizations
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
#alt.renderers.enable('colab')
"""
Explanation: Recommendation Systems with TensorFlow
Introduction
In this lab, we will create a movie recommendation system based on the MovieLens dataset available here. The data consists of movies ratings (on a scale of 1 to 5).
Specifically, we'll be using matrix factorization to learn user and movie embeddings. Concepts highlighted here are also available in the course on Recommendation Systems.
Objectives
Explore the MovieLens Data
Train a matrix factorization model
Inspect the Embeddings
Perform Softmax model training
End of explanation
"""
# Download MovieLens data.
print("Downloading movielens data...")
from urllib.request import urlretrieve
import zipfile
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, ratings, and movies).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0. This will make handling of the
# indices easier later
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
"""
Explanation: We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
End of explanation
"""
users.describe()
"""
Explanation: Exploring the Movielens Data
Before we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset.
Users
We start by printing some basic statistics describing the numeric user features.
End of explanation
"""
users.describe(include=[np.object])
"""
Explanation: We can also print some basic statistics describing the categorical user features
End of explanation
"""
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
"""
Explanation: We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
End of explanation
"""
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
"""
Explanation: Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.
What do you observe, and how might this affect the recommendations?
End of explanation
"""
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
"""
Explanation: Movies
It is also useful to look at information about the movies and their ratings.
End of explanation
"""
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
"""
Explanation: Finally, the last chart shows the distribution of the number of ratings and average rating.
End of explanation
"""
#Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
"""
Explanation: Preliminaries
Our goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with
$U = \begin{bmatrix} u_{1} \ \hline \vdots \ \hline u_{N} \end{bmatrix}$ and
$V = \begin{bmatrix} v_{1} \ \hline \vdots \ \hline v_{M} \end{bmatrix}$.
Here
- $N$ is the number of users,
- $M$ is the number of movies,
- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,
- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,
- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,
- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$.
Sparse Representation of the Rating Matrix
The rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a tf.SparseTensor. A SparseTensor uses three tensors to represent the matrix: tf.SparseTensor(indices, values, dense_shape) represents a tensor, where a value $A_{ij} = a$ is encoded by setting indices[k] = [i, j] and values[k] = a. The last tensor dense_shape is used to specify the shape of the full underlying matrix.
Toy example
Assume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,
user_id | movie_id | rating
--:|--:|--:
0 | 0 | 5.0
0 | 1 | 3.0
1 | 3 | 1.0
The corresponding rating matrix is
$$
A =
\begin{bmatrix}
5.0 & 3.0 & 0 & 0 \
0 & 0 & 0 & 1.0
\end{bmatrix}
$$
And the SparseTensor representation is,
python
SparseTensor(
indices=[[0, 0], [0, 1], [1,3]],
values=[5.0, 3.0, 1.0],
dense_shape=[2, 4])
Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.
In this exercise, we'll write a function that maps from our ratings DataFrame to a tf.SparseTensor.
Hint: you can select the values of a given column of a Dataframe df using df['column_name'].values.
End of explanation
"""
#Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
"""
Explanation: Calculating the error
The model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as
$$
\begin{align}
\text{MSE}(A, UV^\top)
&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top){ij})^2} \
&= \frac{1}{|\Omega|}\sum{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}
\end{align}
$$
where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$.
Exercise 2: Mean Squared Error
Write a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.
Hints:
* in this section, we only consider observed entries when calculating the loss.
* a SparseTensor sp_x is a tuple of three Tensors: sp_x.indices, sp_x.values and sp_x.dense_shape.
* you may find tf.gather_nd and tf.losses.mean_squared_error helpful.
End of explanation
"""
#Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
"""
Explanation: Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).
Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
End of explanation
"""
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
#tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
"""
Explanation: Training a Matrix Factorization model
CFModel (Collaborative Filtering Model) helper class
This is a simple class to train a matrix factorization model using stochastic gradient descent.
The class constructor takes
- the user embeddings U (a tf.Variable).
- the movie embeddings V, (a tf.Variable).
- a loss to optimize (a tf.Tensor).
- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).
After training, one can access the trained embeddings using the model.embeddings dictionary.
Example usage:
U_var = ...
V_var = ...
loss = ...
model = CFModel(U_var, V_var, loss)
model.train(iterations=100, learning_rate=1.0)
user_embeddings = model.embeddings['user_id']
movie_embeddings = model.embeddings['movie_id']
End of explanation
"""
#Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random.normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random.normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
"""
Explanation: Exercise 3: Build a Matrix Factorization model and train it
Using your sparse_mean_square_error function, write a function that builds a CFModel by creating the embedding variables and the train and test losses.
End of explanation
"""
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
"""
Explanation: Great, now it's time to train the model!
Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.
Note: by calling model.train again, the model will continue training starting from the current values of the embeddings.
End of explanation
"""
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
"""
Explanation: The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings.
Inspecting the Embeddings
In this section, we take a closer look at the learned embeddings, by
- computing your recommendations
- looking at the nearest neighbors of some movies,
- looking at the norms of the movie embeddings,
- visualizing the embedding in a projected embedding space.
Exercise 4: Write a function that computes the scores of the candidates
We start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.
As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:
- dot product: the score of item j is $\langle u, V_j \rangle$.
- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.
Hints:
- you can use np.dot to compute the product of two np.Arrays.
- you can use np.linalg.norm to compute the norm of a np.Array.
End of explanation
"""
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
"""
Explanation: Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
End of explanation
"""
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
"""
Explanation: Movie Nearest neighbors
Let's look at the neareast neighbors for some of the movies.
End of explanation
"""
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
"""
Explanation: It seems that the quality of learned embeddings may not be very good. Can you think of potential techniques that could be used to improve them? We can start by inspecting the embeddings.
Movie Embedding Norm
We can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
End of explanation
"""
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model_lowinit, "Aladdin", DOT)
movie_neighbors(model_lowinit, "Aladdin", COSINE)
movie_embedding_norm([model, model_lowinit])
"""
Explanation: Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This can be alleviated by using regularization.
Try changing the value of the hyperparameter init_stddev. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.
How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
End of explanation
"""
tsne_movie_embeddings(model_lowinit)
"""
Explanation: Embedding visualization
Since it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see How to Use t-SNE Effectively.
End of explanation
"""
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
"""
Explanation: You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).
We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons is that we only trained the model on observed pairs, and without regularization.
Softmax model
In this section, we will train a simple softmax model that predicts whether a given user has rated a movie.
The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
End of explanation
"""
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
"""
Explanation: We then create a function that generates an example batch, such that each example contains the following features:
- movie_id: A tensor of strings of the movie ids that the user rated.
- genre: A tensor of strings of the genres of those movies
- year: A tensor of strings of the release year.
End of explanation
"""
#Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
"""
Explanation: Loss function
Recall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product
$$
\hat p(x) = \text{softmax}(\psi(x) V^\top).
$$
Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$.
Exercise 5: Write a loss function for the softmax model.
In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.
Hint: You can use the function tf.nn.sparse_softmax_cross_entropy_with_logits, which takes logits as input, where logits refers to the product $\psi(x) V^\top$.
End of explanation
"""
# Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision_at_10 = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
"""
Explanation: Exercise 6: Build a softmax model, train it, and inspect its embeddings.
We are now ready to build a softmax CFModel. Complete the build_softmax_model function in the next cell. The architecture of the model is defined in the function create_user_embeddings and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the hidden_dims argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.
Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the softmax_loss function of the previous exercise).
End of explanation
"""
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
"""
Explanation: Train the Softmax model
We are now ready to train the softmax model. You can set the following hyperparameters:
- learning rate
- number of iterations. Note: you can run softmax_model.train() again to continue training the model from its current state.
- input embedding dimensions (the input_dims argument)
- number of hidden layers and size of each layer (the hidden_dims argument)
Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using tf.feature_column.categorical_column_with_vocabulary_list, which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using tf.feature_column.embedding_column.
End of explanation
"""
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm(softmax_model)
tsne_movie_embeddings(softmax_model)
"""
Explanation: Inspect the embeddings
We can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
End of explanation
"""
|
google/earthengine-api
|
python/examples/ipynb/AI_platform_demo.ipynb
|
apache-2.0
|
# Cloud authentication.
from google.colab import auth
auth.authenticate_user()
# Import and initialize the Earth Engine library.
import ee
ee.Authenticate()
ee.Initialize()
# Tensorflow setup.
import tensorflow as tf
print(tf.__version__)
# Folium setup.
import folium
print(folium.__version__)
"""
Explanation: <table class="ee-notebook-buttons" align="left"><td>
<a target="_blank" href="http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/AI_platform_demo.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/AI_platform_demo.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td></table>
Introduction
This is a demonstration notebook. Suppose you have developed a model the training of which is constrained by the resources available to the notbook VM. In that case, you may want to use the Google AI Platform to train your model. The advantage of that is that long-running or resource intensive training jobs can be performed in the background. Also, to use your trained model in Earth Engine, it needs to be deployed as a hosted model on AI Platform. This notebook uses previously created training data (see this example notebook) and AI Platform to train a model, deploy it and use it to make predictions in Earth Engine. To do that, code needs to be structured as a python package that can be uploaded to AI Platform. The following cells produce that package programmatically.
Setup software libraries
Install needed libraries to the notebook VM. Authenticate as necessary.
End of explanation
"""
PACKAGE_PATH = 'ai_platform_demo'
!ls -l
!mkdir {PACKAGE_PATH}
!touch {PACKAGE_PATH}/__init__.py
!ls -l {PACKAGE_PATH}
"""
Explanation: Training code package setup
It's necessary to create a Python package to hold the training code. Here we're going to get started with that by creating a folder for the package and adding an empty __init__.py file.
End of explanation
"""
%%writefile {PACKAGE_PATH}/config.py
import tensorflow as tf
# INSERT YOUR PROJECT HERE!
PROJECT = 'your-project'
# INSERT YOUR BUCKET HERE!
BUCKET = 'your-bucket'
# This is a good region for hosting AI models.
REGION = 'us-central1'
# Specify names of output locations in Cloud Storage.
FOLDER = 'fcnn-demo'
JOB_DIR = 'gs://' + BUCKET + '/' + FOLDER + '/trainer'
MODEL_DIR = JOB_DIR + '/model'
LOGS_DIR = JOB_DIR + '/logs'
# Put the EEified model next to the trained model directory.
EEIFIED_DIR = JOB_DIR + '/eeified'
# Pre-computed training and eval data.
DATA_BUCKET = 'ee-docs-demos'
TRAINING_BASE = 'training_patches'
EVAL_BASE = 'eval_patches'
# Specify inputs (Landsat bands) to the model and the response variable.
opticalBands = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7']
thermalBands = ['B10', 'B11']
BANDS = opticalBands + thermalBands
RESPONSE = 'impervious'
FEATURES = BANDS + [RESPONSE]
# Specify the size and shape of patches expected by the model.
KERNEL_SIZE = 256
KERNEL_SHAPE = [KERNEL_SIZE, KERNEL_SIZE]
COLUMNS = [
tf.io.FixedLenFeature(shape=KERNEL_SHAPE, dtype=tf.float32) for k in FEATURES
]
FEATURES_DICT = dict(zip(FEATURES, COLUMNS))
# Sizes of the training and evaluation datasets.
TRAIN_SIZE = 16000
EVAL_SIZE = 8000
# Specify model training parameters.
BATCH_SIZE = 16
EPOCHS = 50
BUFFER_SIZE = 3000
OPTIMIZER = 'SGD'
LOSS = 'MeanSquaredError'
METRICS = ['RootMeanSquaredError']
"""
Explanation: Variables
These variables need to be stored in a place where other code can access them. There are a variety of ways of accomplishing that, but here we'll use the %%writefile command to write the contents of the code cell to a file called config.py.
Note: You need to insert the name of a bucket (below) to which you have write access!
End of explanation
"""
!cat {PACKAGE_PATH}/config.py
from ai_platform_demo import config
print('\n\n', config.BATCH_SIZE)
"""
Explanation: Verify that the written file has the expected contents and is working as intended.
End of explanation
"""
%%writefile {PACKAGE_PATH}/model.py
from . import config
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
# Dataset loading functions
def parse_tfrecord(example_proto):
return tf.io.parse_single_example(example_proto, config.FEATURES_DICT)
def to_tuple(inputs):
inputsList = [inputs.get(key) for key in config.FEATURES]
stacked = tf.stack(inputsList, axis=0)
stacked = tf.transpose(stacked, [1, 2, 0])
return stacked[:,:,:len(config.BANDS)], stacked[:,:,len(config.BANDS):]
def get_dataset(pattern):
glob = tf.io.gfile.glob(pattern)
dataset = tf.data.TFRecordDataset(glob, compression_type='GZIP')
dataset = dataset.map(parse_tfrecord)
dataset = dataset.map(to_tuple)
return dataset
def get_training_dataset():
glob = 'gs://' + config.DATA_BUCKET + '/' + config.FOLDER + '/' + config.TRAINING_BASE + '*'
dataset = get_dataset(glob)
dataset = dataset.shuffle(config.BUFFER_SIZE).batch(config.BATCH_SIZE).repeat()
return dataset
def get_eval_dataset():
glob = 'gs://' + config.DATA_BUCKET + '/' + config.FOLDER + '/' + config.EVAL_BASE + '*'
dataset = get_dataset(glob)
dataset = dataset.batch(1).repeat()
return dataset
# A variant of the UNET model.
def conv_block(input_tensor, num_filters):
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
encoder = layers.BatchNormalization()(encoder)
encoder = layers.Activation('relu')(encoder)
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
encoder = layers.BatchNormalization()(encoder)
encoder = layers.Activation('relu')(encoder)
return encoder
def encoder_block(input_tensor, num_filters):
encoder = conv_block(input_tensor, num_filters)
encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
return encoder_pool, encoder
def decoder_block(input_tensor, concat_tensor, num_filters):
decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
return decoder
def get_model():
inputs = layers.Input(shape=[None, None, len(config.BANDS)]) # 256
encoder0_pool, encoder0 = encoder_block(inputs, 32) # 128
encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64) # 64
encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128) # 32
encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256) # 16
encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512) # 8
center = conv_block(encoder4_pool, 1024) # center
decoder4 = decoder_block(center, encoder4, 512) # 16
decoder3 = decoder_block(decoder4, encoder3, 256) # 32
decoder2 = decoder_block(decoder3, encoder2, 128) # 64
decoder1 = decoder_block(decoder2, encoder1, 64) # 128
decoder0 = decoder_block(decoder1, encoder0, 32) # 256
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
model = models.Model(inputs=[inputs], outputs=[outputs])
model.compile(
optimizer=optimizers.get(config.OPTIMIZER),
loss=losses.get(config.LOSS),
metrics=[metrics.get(metric) for metric in config.METRICS])
return model
"""
Explanation: Training data, evaluation data and model
The following is code to load training/evaluation data and the model. Write this into model.py. Note that these functions are developed and explained in this example notebook.
End of explanation
"""
from ai_platform_demo import model
eval = model.get_eval_dataset()
print(iter(eval.take(1)).next())
model = model.get_model()
print(model.summary())
"""
Explanation: Verify that model.py is functioning as intended.
End of explanation
"""
%%writefile {PACKAGE_PATH}/task.py
from . import config
from . import model
import tensorflow as tf
if __name__ == '__main__':
training = model.get_training_dataset()
evaluation = model.get_eval_dataset()
m = model.get_model()
m.fit(
x=training,
epochs=config.EPOCHS,
steps_per_epoch=int(config.TRAIN_SIZE / config.BATCH_SIZE),
validation_data=evaluation,
validation_steps=int(config.EVAL_SIZE),
callbacks=[tf.keras.callbacks.TensorBoard(config.LOGS_DIR)])
m.save(config.MODEL_DIR, save_format='tf')
"""
Explanation: Training task
At this stage, there should be config.py storing variables and model.py which has code for getting the training/evaluation data and the model. All that's left is code for training the model. The following will create task.py, which will get the training and eval data, train the model and save it when it's done in a Cloud Storage bucket.
End of explanation
"""
import time
JOB_NAME = 'demo_training_job_' + str(int(time.time()))
TRAINER_PACKAGE_PATH = 'ai_platform_demo'
MAIN_TRAINER_MODULE = 'ai_platform_demo.task'
REGION = 'us-central1'
"""
Explanation: Submit the package to AI Platform for training
Now there's everything to submit this job, which can be done from the command line. First, define some needed variables.
Note: You need to insert the name of a Cloud project (below) you own!
End of explanation
"""
!gcloud ai-platform jobs submit training {JOB_NAME} \
--job-dir {config.JOB_DIR} \
--package-path {TRAINER_PACKAGE_PATH} \
--module-name {MAIN_TRAINER_MODULE} \
--region {REGION} \
--project {config.PROJECT} \
--runtime-version 2.3 \
--python-version 3.7 \
--scale-tier basic-gpu
"""
Explanation: Now the training job is ready to be started. First, you need to enable the ML API for your project. This can be done from this link to the Cloud Console. See this guide for details. Note that the Python and Tensorflow versions should match what is used in the Colab notebook.
End of explanation
"""
desc = !gcloud ai-platform jobs describe {JOB_NAME} --project {PROJECT}
state = desc.grep('state:')[0].split(':')[1].strip()
print(state)
"""
Explanation: Monitor the training job
There's not much more to do until the model is finished training (~24 hours), but it's fun and useful to monitor its progress. You can do that programmatically with another gcloud command. The output of that command can be read into an IPython.utils.text.SList from which the state is extracted and ensured to be SUCCEEDED. Or you can monitor it from the AI Platform jobs page on the Cloud Console.
End of explanation
"""
%load_ext tensorboard
%tensorboard --logdir {config.LOGS_DIR}
"""
Explanation: Inspect the trained model
Once the training job has finished, verify that you can load the trained model and print a summary of the fitted parameters. It's also useful to examine the logs with TensorBoard. There's a convenient notebook extension that will launch TensorBoard in the Colab notebook. Examine the training and testing learning curves to ensure that the training process has converged.
End of explanation
"""
from tensorflow.python.tools import saved_model_utils
meta_graph_def = saved_model_utils.get_meta_graph_def(config.MODEL_DIR, 'serve')
inputs = meta_graph_def.signature_def['serving_default'].inputs
outputs = meta_graph_def.signature_def['serving_default'].outputs
# Just get the first thing(s) from the serving signature def. i.e. this
# model only has a single input and a single output.
input_name = None
for k,v in inputs.items():
input_name = v.name
break
output_name = None
for k,v in outputs.items():
output_name = v.name
break
# Make a dictionary that maps Earth Engine outputs and inputs to
# AI Platform inputs and outputs, respectively.
import json
input_dict = "'" + json.dumps({input_name: "array"}) + "'"
output_dict = "'" + json.dumps({output_name: "impervious"}) + "'"
# You need to set the project before using the model prepare command.
!earthengine set_project {PROJECT}
!earthengine model prepare --source_dir {config.MODEL_DIR} --dest_dir {config.EEIFIED_DIR} --input {input_dict} --output {output_dict}
"""
Explanation: Prepare the model for making predictions in Earth Engine
Before we can use the model in Earth Engine, it needs to be hosted by AI Platform. But before we can host the model on AI Platform we need to EEify (a new word!) it. The EEification process merely appends some extra operations to the input and outputs of the model in order to accommodate the interchange format between pixels from Earth Engine (float32) and inputs to AI Platform (base64). (See this doc for details.)
earthengine model prepare
The EEification process is handled for you using the Earth Engine command earthengine model prepare. To use that command, we need to specify the input and output model directories and the name of the input and output nodes in the TensorFlow computation graph. We can do all that programmatically:
End of explanation
"""
%%writefile config.yaml
autoScaling:
minNodes: 10
MODEL_NAME = 'fcnn_demo_model'
VERSION_NAME = 'v' + str(int(time.time()))
print('Creating version: ' + VERSION_NAME)
!gcloud ai-platform models create {MODEL_NAME} \
--project {PROJECT} \
--region {REGION}
!gcloud ai-platform versions create {VERSION_NAME} \
--project {config.PROJECT} \
--model {MODEL_NAME} \
--region {REGION} \
--origin {config.EEIFIED_DIR} \
--framework "TENSORFLOW" \
--runtime-version 2.3 \
--python-version 3.7 \
--config=config.yaml
"""
Explanation: Note that you can also use the TensorFlow saved model command line tool to do this manually. See this doc for details. Also note the names we've specified for the new inputs and outputs: array and impervious, respectively.
Perform inference using the trained model in Earth Engine
Before it's possible to get predictions from the trained and EEified model, it needs to be deployed on AI Platform. The first step is to create the model. The second step is to create a version. See this guide for details. Note that models and versions can be monitored from the AI Platform models page of the Cloud Console.
To ensure that the model is ready for predictions without having to warm up nodes, you can use a configuration yaml file to set the scaling type of this version to autoScaling, and, set a minimum number of nodes for the version. This will ensure there are always nodes on stand-by, however, you will be charged as long as they are running. For this example, we'll set the minNodes to 10. That means that at a minimum, 10 nodes are always up and running and waiting for predictions. The number of nodes will also scale up automatically if needed.
End of explanation
"""
# Use Landsat 8 surface reflectance data.
l8sr = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# Cloud masking function.
def maskL8sr(image):
cloudShadowBitMask = ee.Number(2).pow(3).int()
cloudsBitMask = ee.Number(2).pow(5).int()
qa = image.select('pixel_qa')
mask1 = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And(
qa.bitwiseAnd(cloudsBitMask).eq(0))
mask2 = image.mask().reduce('min')
mask3 = image.select(config.opticalBands).gt(0).And(
image.select(config.opticalBands).lt(10000)).reduce('min')
mask = mask1.And(mask2).And(mask3)
return image.select(config.opticalBands).divide(10000).addBands(
image.select(config.thermalBands).divide(10).clamp(273.15, 373.15)
.subtract(273.15).divide(100)).updateMask(mask)
# The image input data is a cloud-masked median composite.
image = l8sr.filterDate(
'2015-01-01', '2017-12-31').map(maskL8sr).median().select(config.BANDS).float()
# Load the trained model and use it for prediction. If you specified a region
# other than the default (us-central1) at model creation, specify it here.
model = ee.Model.fromAiPlatformPredictor(
projectName = config.PROJECT,
modelName = MODEL_NAME,
version = VERSION_NAME,
inputTileSize = [144, 144],
inputOverlapSize = [8, 8],
proj = ee.Projection('EPSG:4326').atScale(30),
fixInputProj = True,
outputBands = {'impervious': {
'type': ee.PixelType.float()
}
}
)
predictions = model.predictImage(image.toArray())
# Use folium to visualize the input imagery and the predictions.
mapid = image.getMapId({'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3})
map = folium.Map(location=[38., -122.5], zoom_start=13)
folium.TileLayer(
tiles=mapid['tile_fetcher'].url_format,
attr='Google Earth Engine',
overlay=True,
name='median composite',
).add_to(map)
mapid = predictions.getMapId({'min': 0, 'max': 1})
folium.TileLayer(
tiles=mapid['tile_fetcher'].url_format,
attr='Google Earth Engine',
overlay=True,
name='predictions',
).add_to(map)
map.add_child(folium.LayerControl())
map
"""
Explanation: There is now a trained model, prepared for serving to Earth Engine, hosted and versioned on AI Platform. We can now connect Earth Engine directly to the trained model for inference. You do that with the ee.Model.fromAiPlatformPredictor command.
ee.Model.fromAiPlatformPredictor
For this command to work, we need to know a lot about the model. To connect to the model, you need to know the name and version.
Inputs
You need to be able to recreate the imagery on which it was trained in order to perform inference. Specifically, you need to create an array-valued input from the scaled data and use that for input. (Recall that the new input node is named array, which is convenient because the array image has one band, named array by default.) The inputs will be provided as 144x144 patches (inputTileSize), at 30-meter resolution (proj), but 8 pixels will be thrown out (inputOverlapSize) to minimize boundary effects.
Outputs
The output (which you also need to know), is a single float band named impervious.
End of explanation
"""
|
hetaodie/hetaodie.github.io
|
assets/media/uda-ml/qinghua/shijianchafenfangfa/迷你项目:时间差分方法(第 0 部分和第 1 部分)/Temporal_Difference_Solution-zh.ipynb
|
mit
|
import gym
env = gym.make('CliffWalking-v0')
"""
Explanation: 迷你项目:时间差分方法
在此 notebook 中,你将自己编写很多时间差分 (TD) 方法的实现。
虽然我们提供了一些起始代码,但是你可以删掉这些提示并从头编写代码。
第 0 部分:探索 CliffWalkingEnv
请使用以下代码单元格创建 CliffWalking 环境的实例。
End of explanation
"""
[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]
"""
Explanation: 智能体会在 $4\times 12$ 网格世界中移动,状态编号如下所示:
End of explanation
"""
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
"""
Explanation: 在任何阶段开始时,初始状态都是状态 36。状态 47是唯一的终止状态,悬崖对应的是状态 37 到 46。
智能体可以执行 4 个潜在动作:
End of explanation
"""
print(env.action_space)
print(env.observation_space)
Discrete(4)
Discrete(48)
"""
Explanation: 因此,$\mathcal{S}^+={0, 1, \ldots, 47}$ 以及 $\mathcal{A} ={0, 1, 2, 3}$。请通过运行以下代码单元格验证这一点。
End of explanation
"""
import numpy as np
from plot_utils import plot_values
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0:13][0] = -np.arange(3, 15)[::-1]
V_opt[0:13][1] = -np.arange(3, 15)[::-1] + 1
V_opt[0:13][2] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
<matplotlib.figure.Figure at 0x7f4415081588>
"""
Explanation: 在此迷你项目中,我们将逐步发现 CliffWalking 环境的最优策略。最优状态值函数可视化结果如下。请立即花时间确保理解_为何_ 这是最优状态值函数。
End of explanation
"""
policy = np.hstack([1*np.ones(11), 2, 0, np.zeros(10), 2, 0, np.zeros(10), 2, 0, -1*np.ones(11)])
print("\nPolicy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy.reshape(4,12))
Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):
[[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 2.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 2.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 2.]
[ 0. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1.]]
"""
Explanation: 第 1 部分:TD 预测 - 状态值
在此部分,你将自己编写 TD 预测的实现(用于估算状态值函数)。
我们首先将研究智能体按以下方式移动的策略:
- 在状态 0 到 10(含)时向 RIGHT 移动,
- 在状态 11、23 和 35 时向 DOWN 移动,
- 在状态 12 到 22(含)、状态 24 到 34(含)和状态 36 时向 UP移动。
下面指定并输出了该策略。注意,智能体没有选择动作的状态被标记为 -1。
End of explanation
"""
V_true = np.zeros((4,12))
for i in range(3):
V_true[0:12][i] = -np.arange(3, 15)[::-1] - i
V_true[1][11] = -2
V_true[2][11] = -1
V_true[3][0] = -17
plot_values(V_true)
"""
Explanation: 请运行下个单元格,可视化与此策略相对应的状态值函数。你需要确保花时间来理解为何这是对应的值函数!
End of explanation
"""
from collections import defaultdict, deque
import sys
def td_prediction(env, num_episodes, policy, alpha, gamma=1.0):
# initialize empty dictionaries of floats
V = defaultdict(float)
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# begin an episode, observe S
state = env.reset()
while True:
# choose action A
action = policy[state]
# take action A, observe R, S'
next_state, reward, done, info = env.step(action)
# perform updates
V[state] = V[state] + (alpha * (reward + (gamma * V[next_state]) - V[state]))
# S <- S'
state = next_state
# end episode if reached terminal state
if done:
break
return V
"""
Explanation: 你将通过 TD 预测算法尝试逼近上图的结果。
你的 TD 预测算法将包括 5 个参数:
- env:这是 OpenAI Gym 环境的实例。
- num_episodes:这是通过智能体-环境互动生成的阶段次数。
- policy:这是一个一维 numpy 数组,其中 policy.shape 等于状态数量 (env.nS)。policy[s] 返回智能体在状态 s 时选择的动作。
- alpha:这是更新步骤的步长参数。
- gamma:这是折扣率。它必须是在 0 到 1(含)之间的值,默认值为:1。
该算法会返回以下输出结果:
V:这是一个字典,其中 V[s] 是状态 s 的估算值。
请完成以下代码单元格中的函数。
End of explanation
"""
import check_test
# evaluate the policy and reshape the state-value function
V_pred = td_prediction(env, 5000, policy, .01)
# please do not change the code below this line
V_pred_plot = np.reshape([V_pred[key] if key in V_pred else 0 for key in np.arange(48)], (4,12))
check_test.run_check('td_prediction_check', V_pred_plot)
plot_values(V_pred_plot)
Episode 5000/5000
"""
Explanation: 请运行以下代码单元格,以测试你的实现并可视化估算的状态值函数。如果代码单元格返回 PASSED,则表明你正确地实现了该函数!你可以随意更改提供给该函数的 num_episodes 和 alpha 参数。但是,如果你要确保单元测试的准确性,请勿更改 gamma 的默认值。
End of explanation
"""
def update_Q(Qsa, Qsa_next, reward, alpha, gamma):
""" updates the action-value function estimate using the most recent time step """
return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa))
def epsilon_greedy_probs(env, Q_s, i_episode, eps=None):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
epsilon = 1.0 / i_episode
if eps is not None:
epsilon = eps
policy_s = np.ones(env.nA) * epsilon / env.nA
policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / env.nA)
return policy_s
import matplotlib.pyplot as plt
%matplotlib inline
def sarsa(env, num_episodes, alpha, gamma=1.0):
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# initialize performance monitor
plot_every = 100
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=num_episodes)
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# initialize score
score = 0
# begin an episode, observe S
state = env.reset()
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[state], i_episode)
# pick action A
action = np.random.choice(np.arange(env.nA), p=policy_s)
# limit number of time steps per episode
for t_step in np.arange(300):
# take action A, observe R, S'
next_state, reward, done, info = env.step(action)
# add reward to score
score += reward
if not done:
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[next_state], i_episode)
# pick next action A'
next_action = np.random.choice(np.arange(env.nA), p=policy_s)
# update TD estimate of Q
Q[state][action] = update_Q(Q[state][action], Q[next_state][next_action],
reward, alpha, gamma)
# S <- S'
state = next_state
# A <- A'
action = next_action
if done:
# update TD estimate of Q
Q[state][action] = update_Q(Q[state][action], 0, reward, alpha, gamma)
# append score
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(scores),endpoint=False),np.asarray(scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(scores))
return Q
"""
Explanation: <span style="color: green;">PASSED</span>
你的估算状态值函数与该策略对应的真状态值函数有多接近?
你可能注意到了,有些状态值不是智能体估算的。因为根据该策略,智能体不会经历所有状态。在 TD 预测算法中,智能体只能估算所经历的状态对应的值。
第 2 部分:TD 控制 - Sarsa
在此部分,你将自己编写 Sarsa 控制算法的实现。
你的算法将有四个参数:
- env:这是 OpenAI Gym 环境的实例。
- num_episodes:这是通过智能体-环境互动生成的阶段次数。
- alpha:这是更新步骤的步长参数。
- gamma:这是折扣率。它必须是在 0 到 1(含)之间的值,默认值为:1。
该算法会返回以下输出结果:
Q:这是一个字典(一维数组),其中 Q[s][a] 是状态 s 和动作 a 对应的估算动作值。
请完成以下代码单元格中的函数。
(你可以随意定义其他函数,以帮助你整理代码。)
End of explanation
"""
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 5000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
Episode 5000/5000
"""
Explanation: 请使用下个代码单元格可视化估算的最优策略和相应的状态值函数。
如果代码单元格返回 PASSED,则表明你正确地实现了该函数!你可以随意更改提供给该函数的 num_episodes 和 alpha 参数。但是,如果你要确保单元测试的准确性,请勿更改 gamma 的默认值。
End of explanation
"""
Best Average Reward over 100 Episodes: -13.0
"""
Explanation:
End of explanation
"""
Estimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):
[[ 3 1 0 1 1 1 1 2 0 1 1 2]
[ 2 1 2 2 1 0 1 0 1 3 2 2]
[ 1 1 1 1 1 1 1 1 1 1 1 2]
[ 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]]
"""
Explanation: <span style="color: green;">PASSED</span>
End of explanation
"""
def q_learning(env, num_episodes, alpha, gamma=1.0):
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# initialize performance monitor
plot_every = 100
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=num_episodes)
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# initialize score
score = 0
# begin an episode, observe S
state = env.reset()
while True:
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[state], i_episode)
# pick next action A
action = np.random.choice(np.arange(env.nA), p=policy_s)
# take action A, observe R, S'
next_state, reward, done, info = env.step(action)
# add reward to score
score += reward
# update Q
Q[state][action] = update_Q(Q[state][action], np.max(Q[next_state]), \
reward, alpha, gamma)
# S <- S'
state = next_state
# until S is terminal
if done:
# append score
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(scores),endpoint=False),np.asarray(scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(scores))
return Q
"""
Explanation: 第 3 部分:TD 控制 - Q 学习
在此部分,你将自己编写 Q 学习控制算法的实现。
你的算法将有四个参数:
- env:这是 OpenAI Gym 环境的实例。
- num_episodes:这是通过智能体-环境互动生成的阶段次数。
- alpha:这是更新步骤的步长参数。
- gamma:这是折扣率。它必须是在 0 到 1(含)之间的值,默认值为:1。
该算法会返回以下输出结果:
- Q:这是一个字典(一维数组),其中 Q[s][a] 是状态 s 和动作 a 对应的估算动作值。
请完成以下代码单元格中的函数。
(你可以随意定义其他函数,以帮助你整理代码。)
End of explanation
"""
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 5000, .01)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
Episode 5000/5000
"""
Explanation: 请使用下个代码单元格可视化估算的最优策略和相应的状态值函数。
如果代码单元格返回 PASSED,则表明你正确地实现了该函数!你可以随意更改提供给该函数的 num_episodes 和 alpha 参数。但是,如果你要确保单元测试的准确性,请勿更改 gamma 的默认值。
End of explanation
"""
Best Average Reward over 100 Episodes: -13.0
"""
Explanation:
End of explanation
"""
Estimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):
[[ 1 0 2 1 1 1 0 1 0 1 1 1]
[ 0 1 1 1 3 1 1 1 0 2 2 2]
[ 1 1 1 1 1 1 1 1 1 1 1 2]
[ 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 0]]
"""
Explanation: <span style="color: green;">PASSED</span>
End of explanation
"""
def expected_sarsa(env, num_episodes, alpha, gamma=1.0):
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# initialize performance monitor
plot_every = 100
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=num_episodes)
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# initialize score
score = 0
# begin an episode
state = env.reset()
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[state], i_episode, 0.005)
while True:
# pick next action
action = np.random.choice(np.arange(env.nA), p=policy_s)
# take action A, observe R, S'
next_state, reward, done, info = env.step(action)
# add reward to score
score += reward
# get epsilon-greedy action probabilities (for S')
policy_s = epsilon_greedy_probs(env, Q[next_state], i_episode, 0.005)
# update Q
Q[state][action] = update_Q(Q[state][action], np.dot(Q[next_state], policy_s), \
reward, alpha, gamma)
# S <- S'
state = next_state
# until S is terminal
if done:
# append score
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(scores),endpoint=False),np.asarray(scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(scores))
return Q
"""
Explanation: 第 4 部分:TD 控制 - 预期 Sarsa
在此部分,你将自己编写预期 Sarsa 控制算法的实现。
你的算法将有四个参数:
- env:这是 OpenAI Gym 环境的实例。
- num_episodes:这是通过智能体-环境互动生成的阶段次数。
- alpha:这是更新步骤的步长参数。
- gamma:这是折扣率。它必须是在 0 到 1(含)之间的值,默认值为:1。
该算法会返回以下输出结果:
- Q:这是一个字典(一维数组),其中 Q[s][a] 是状态 s 和动作 a 对应的估算动作值。
请完成以下代码单元格中的函数。
(你可以随意定义其他函数,以帮助你整理代码。)
End of explanation
"""
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expected_sarsa(env, 10000, 1)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
Episode 10000/10000
"""
Explanation: 请使用下个代码单元格可视化估算的最优策略和相应的状态值函数。
如果代码单元格返回 PASSED,则表明你正确地实现了该函数!你可以随意更改提供给该函数的 num_episodes 和 alpha 参数。但是,如果你要确保单元测试的准确性,请勿更改 gamma 的默认值。
End of explanation
"""
Best Average Reward over 100 Episodes: -13.03
"""
Explanation:
End of explanation
"""
Estimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):
[[ 1 1 1 3 1 1 1 2 1 1 2 2]
[ 1 1 1 1 1 1 1 1 1 1 1 2]
[ 1 1 1 1 1 1 1 1 1 1 1 2]
[ 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 0]]
"""
Explanation: <span style="color: green;">PASSED</span>
End of explanation
"""
|
saashimi/code_guild
|
wk4/notebooks/wk4.2.ipynb
|
mit
|
"""
def flatten(lst):
res = []
for elem in lst:
if type(elem) == type([]):
res += flatten(elem)
print(res)
else:
res.append(elem)
return res
"""
def flatten(lst):
res = []
def f(lst):
for elem in lst:
if type(elem) == type([]):
f(elem)
else:
res.append(elem)
return res
return f(lst)
def recursive_min(lst):
return min(flatten(lst))
def count(val, lst):
return flatten(lst).count(val)
def f(lst=[]):
lst.append(1)
return lst
f()
assert flatten([2,9,[2,"Hi Skip!",13,2],8,[2,6]]) == [2,9,2,"Hi Skip!",13,2,8,2,6]
assert flatten([[9,[7,1,13,2],8],[7,6]]) == [9,7,1,13,2,8,7,6]
assert flatten([[9,[7,1,13,2],8],[2,6]]) == [9,7,1,13,2,8,2,6]
assert flatten([["this",["a",["thing"],"a"],"is"],["a","easy"]]) == ["this","a","thing","a","is","a","easy"]
assert flatten([]) == []
assert count(2, []) == 0
assert count(2, [2, 9, [2, 1, 13, 2], 8, [2, 6]]) == 4
assert count(7, [[9, [7, 1, 13, 2], 8], [7, 6]]) == 2
assert count(15, [[9, [7, 1, 13, 2], 8], [2, 6]]) == 0
assert count(5, [[5, [5, [1, 5], 5], 5], [5, 6]]) == 6
assert count("a", [["this",["a",["thing","a"],"a"],"is"], ["a","easy"]]) == 4
assert recursive_min([2, 9, [1, 13], 8, 6]) == 1
assert recursive_min([2, [[100, 1], 90], [10, 13], 8, 6]) == 1
assert recursive_min([2, [[13, -7], 90], [1, 100], 8, 6]) == -7
assert recursive_min([[[-13, 7], 90], 2, [1, 100], 8, 6]) == -13
"""
Explanation: Warm-Up Exercises
Some recursion problems
Write a function, recursive_min, that returns the smallest value in a nested number list. Assume there are no empty lists or sublists:
test(recursive_min([2, 9, [1, 13], 8, 6]) == 1)
test(recursive_min([2, [[100, 1], 90], [10, 13], 8, 6]) == 1)
test(recursive_min([2, [[13, -7], 90], [1, 100], 8, 6]) == -7)
test(recursive_min([[[-13, 7], 90], 2, [1, 100], 8, 6]) == -13)
Write a function count that returns the number of occurrences of target in a nested list:
test(count(2, []), 0)
test(count(2, [2, 9, [2, 1, 13, 2], 8, [2, 6]]) == 4)
test(count(7, [[9, [7, 1, 13, 2], 8], [7, 6]]) == 2)
test(count(15, [[9, [7, 1, 13, 2], 8], [2, 6]]) == 0)
test(count(5, [[5, [5, [1, 5], 5], 5], [5, 6]]) == 6)
test(count("a",
[["this",["a",["thing","a"],"a"],"is"], ["a","easy"]]) == 4)
Write a function flatten that returns a simple list containing all the values in a nested list:
test(flatten([2,9,[2,1,13,2],8,[2,6]]) == [2,9,2,1,13,2,8,2,6])
test(flatten([[9,[7,1,13,2],8],[7,6]]) == [9,7,1,13,2,8,7,6])
test(flatten([[9,[7,1,13,2],8],[2,6]]) == [9,7,1,13,2,8,2,6])
test(flatten([["this",["a",["thing"],"a"],"is"],["a","easy"]]) ==
["this","a","thing","a","is","a","easy"])
test(flatten([]) == [])
End of explanation
"""
import xml.etree.ElementTree as ET
data = """
<person>
<name>Chuck</name>
<phone type="intl">
+1 734 303 4456
</phone>
<email hide="yes"/>
</person>"""
tree = ET.fromstring(data)
print 'Name:',tree.find('name').text
print 'Attr:',tree.find('email').get('hide')
import xml.etree.ElementTree as ET
input = """
<stuff>
<users>
<user x="2">
<id>001</id>
<name>Chuck</name>
</user>
<user x="7">
<id>009</id>
<name>Brent</name>
</user>
</users>
</stuff>"""
stuff = ET.fromstring(input)
lst = stuff.findall('users/user')
print 'User count:', len(lst)
for item in lst:
print 'Name', item.find('name').text
print 'Id', item.find('id').text
print 'Attribute', item.get('x')
import json
input = """
[
{ "id" : "001",
"x" : "2",
"name" : "Chuck"
} ,
{ "id" : "009",
"x" : "7",
"name" : "Chuck"
}
]"""
info = json.loads(input)
print 'User count:', len(info)
for item in info:
print 'Name', item['name']
print 'Id', item['id']
print 'Attribute', item['x']
"""
Explanation: XML and JSON
End of explanation
"""
import urllib
import json
serviceurl = 'http://maps.googleapis.com/maps/api/geocode/json?'
while True:
address = raw_input('Enter location: ')
if len(address) < 1 : break
url = serviceurl + urllib.urlencode({'sensor':'false',
'address': address})
print 'Retrieving', url
uh = urllib.urlopen(url)
data = uh.read()
print 'Retrieved',len(data),'characters'
try: js = json.loads(str(data))
except: js = None
if 'status' not in js or js['status'] != 'OK':
print '==== Failure To Retrieve ===='
print data
continue
print json.dumps(js, indent=4)
lat = js["results"][0]["geometry"]["location"]["lat"]
lng = js["results"][0]["geometry"]["location"]["lng"]
print 'lat',lat,'lng',lng
location = js['results'][0]['formatted_address']
print location
"""
Explanation: Exercises
Make some memes using a meme API. Use requests.
Take a look at the google geocoding API. Try to send it a location (like Porland!) and get the latitude and longitude back.
APIs
Google Geocoding API
End of explanation
"""
|
karenlmasters/ComputationalPhysicsUnit
|
StochasticMethods/In Class Exercises - Random Processes Lab 2.ipynb
|
apache-2.0
|
from astropy import constants as const
import numpy as np
import matplotlib.pyplot as plt
#This just needed for the Notebook to show plots inline.
%matplotlib inline
print(const.e.value)
print(const.e)
#Atomic Number of Gold
Z = 72
e = const.e.value
E = 7.7e6*e
eps0 = const.eps0.value
sigma = const.a0.value/100.
#print(Z,e,E,eps0,sigma)
N = 1000000 #Start small, and increase to 1 million when you're sure the code runs correctly.
#Function to generate two sets of random Gaussian numbers.
def gaussian():
r = np.sqrt(-2*sigma*sigma*np.log(1-np.random.random()))
theta=2*np.pi*np.random.random()
x=r*np.cos(theta)
y=r*np.sin(theta)
return x,y
#Main Programme
count = 0 #Initate count of particles bounced back
for i in range(N):
x,y=gaussian()
b=np.sqrt(x*x+y*y)
#If this is true the particle is bounced back
if b<Z*e*e/(2*np.pi*eps0*E):
count +=1
print(count, "particles were reflected out of ", N, "incident")
print("this is a bounce fraction of {0:.5f} +/- {1:.5f}".format(count/N,np.sqrt(count)/N))
"""
Explanation: In Class Exercise - Rutherford Scattering of a Gaussian Beam of Particles
When a positively charged particle passes close to an atom, its path will be deflected (or scatter) by an angle $\theta$ which obeys the relation:
$$\tan (\theta/2) = \frac{Z e^2}{2 \pi \epsilon_0 E b} $$
where $Z$ is the atomic number, $e$ is the electric charge, $\epsilon_0$ is the permittivity of free space, $E$ is the kinetic energy of the incident particle, and $b$ is the impact parameter (see diagram).
This process is called "Rutherford Scattering" after Ernest Rutherford, who was among the first physicists to explore this process.
We will model a beam of 1 million $\alpha$ particles with a 2D Gaussian profile incident on a single atom of Gold. We would like to calculate the fraction of these particles which "bounce back" (ie. scatter through angles greater than $\theta = 90^\circ$.
When bounce back happens, therefore $\tan (\theta/2) \gt 1$, so
$$ b \lt \frac{Z e^2}{2 \pi \epsilon_0 E} $$
Write a programme which simulates the incident Gaussian beam and calculates the fraction of particles which bounce back.
Please write your own function to calculate Gaussian random numbers using this format:
def gaussian():
YOUR ALGORTHIM HERE
return x,y
Make use of these parameters for the incident beam:
$E = $7.7 MeV
$\sigma = a_0/100$ (where $a_0$ is the Bohr radius).
Hint: you can make use of the astropy.constants module to import various constants you need for this problem.
End of explanation
"""
#Atomic Number of Gold
Z = 79
e = const.e.value
E = 7.7e6*e
eps0 = const.eps0.value
sigma = const.a0.value/100.
#print(Z,e,E,eps0,sigma)
N = 1000000 #Start small, and increase to 1 million when you're sure the code runs correctly.
#Main Programme
count = 0 #Initate count of particles bounced back
for i in range(N):
b= np.sqrt(-2*sigma*sigma*np.log(1-np.random.random()))
#If this is true the particle is bounced back
if b<Z*e*e/(2*np.pi*eps0*E):
count +=1
print(count, "particles were reflected out of ", N, "incident")
print("this is a bounce fraction of {0:.5f} +/- {1:.5f}".format(count/N,np.sqrt(count)/N))
?np.random.normal
#Atomic Number of Gold
Z = 79
e = const.e.value
E = 7.7e6*e
eps0 = const.eps0.value
sigma = const.a0.value/100.
print(Z,e,E,eps0,sigma)
N = 1000 #Start small, and increase to 1 million when you're sure the code runs correctly.
#Main Programme
count = 0 #Initate count of particles bounced back
for i in range(N):
x=np.random.normal(0,sigma,1)
y=np.random.normal(0,sigma,1)
b=np.sqrt(x*x+y*y)
#If this is true the particle is bounced back
if b<Z*e*e/(2*np.pi*eps0*E):
count +=1
print(count, "particles were reflected out of ", N, "incident")
print("this is a bounce fraction of {0:.5f} +/- {1:.5f}".format(count/N,np.sqrt(count)/N))
"""
Explanation: Notice something about $b$?
End of explanation
"""
#Define the function
def f(x):
fx = (np.sin(1/(x*(2-x))))**2
return fx
#Integrate the function from x=0-2
#Note that you need to know the maximum value of the function
#over this range (which is y=1), and therefore the area of the box
#from which we draw random number is A=2.
N=1000000
k=0
for i in range(N):
x=2*np.random.random()
y=np.random.random()
if y<f(x):
k+=1
A=2.
I=A*k/N
print("The integral is equal to I = ",I)
"""
Explanation: In Class Exercise: Monte Carlo Integration
Write a programme following the method of Monte Carlo Integration to calculate
$$ I = \int_0^2 \sin^2 [\frac{1}{x(2-x)}] dx. $$
As you will need to calculate $f(x) = \sin^2 [\frac{1}{x(2-x)}]$ many times please write a user defined function for this part of your programme.
End of explanation
"""
|
tensorflow/docs-l10n
|
site/zh-cn/guide/random_numbers.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
# Creates 2 virtual devices cpu:0 and cpu:1 for using distribution strategy
physical_devices = tf.config.experimental.list_physical_devices("CPU")
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0], [
tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration()
])
"""
Explanation: 生成随机数
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://tensorflow.google.cn/guide/random_numbers" class=""><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" class="">在 TensorFlow.org 上查看</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/random_numbers.ipynb" class=""><img src="https://tensorflow.google.cn/images/colab_logo_32px.png" class="">在 Google Colab 中运行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/random_numbers.ipynb" class=""><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png" class="">在 GitHub 上查看源代码</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/guide/random_numbers.ipynb" class=""><img src="https://tensorflow.google.cn/images/download_logo_32px.png" class="">下载笔记本</a></td>
</table>
TensorFlow 在 tf.random 模块中提供了一组伪随机数生成器 (RNG)。本文介绍如何控制随机数生成器,以及这些生成器如何与其他 Tensorflow 子系统交互。
TensorFlow 提供了两种方法来控制随机数生成过程:
通过明确使用 tf.random.Generator 对象。每个此类对象都会在 tf.Variable 中维护一个状态,该状态在每次生成随机数后都会发生改变。
通过使用纯函数式无状态随机函数,如 tf.random.stateless_uniform。在同一设备上调用具有相同参数(包括种子)的这些函数会产生相同的结果。
警告:目前尚未弃用 TF 1.x 中的旧版 RNG(如 tf.random.uniform 和 tf.random.normal),但强烈建议不要使用。
警告:不保证随机数在不同 TensorFlow 版本间一致,请参阅:版本兼容性
设置
End of explanation
"""
g1 = tf.random.Generator.from_seed(1)
print(g1.normal(shape=[2, 3]))
g2 = tf.random.get_global_generator()
print(g2.normal(shape=[2, 3]))
"""
Explanation: tf.random.Generator 类
当您希望每次调用 RNG 都产生不同的结果时,可以使用 tf.random.Generator 类。它会维护一个内部状态(由 tf.Variable 对象管理),该状态在每次生成随机数时都会更新。由于该状态由 tf.Variable 管理,因此,它可以利用 tf.Variable 提供的所有功能,如简单的检查点、自动控制依赖项和线程安全性。
通过手动创建 tf.random.Generator类的一个对象,您可以获得该生成器,或者通过调用 tf.random.get_global_generator(),您可以获得默认全局生成器:
End of explanation
"""
g1 = tf.random.Generator.from_seed(1, alg='philox')
print(g1.normal(shape=[2, 3]))
"""
Explanation: 有多种方法可以创建生成器对象。最简单的方法是使用 Generator.from_seed(代码如上),从种子创建生成器。种子可以是任何非负整数,from_seed 还有一个可选参数 alg,这是该生成器将使用的 RNG 算法。
End of explanation
"""
g = tf.random.Generator.from_non_deterministic_state()
print(g.normal(shape=[2, 3]))
"""
Explanation: 有关详细信息,请参阅后文中的算法部分。
创建生成器的另一种方法是使用 Generator.from_non_deterministic_state。以这种方式创建的生成器首先会处于非确定状态,具体取决于时间和操作系统等因素。
End of explanation
"""
g = tf.random.Generator.from_seed(1)
print(g.normal([]))
print(g.normal([]))
g.reset_from_seed(1)
print(g.normal([]))
"""
Explanation: 还有其他方法可以创建生成器,比如说通过显式状态创建,本指南不作赘述。
当使用 tf.random.get_global_generator 来获取全局生成器时,需要注意设备放置。第一次调用 tf.random.get_global_generator 时就会创建全局生成器(从非确定状态),并将其放置在该调用的作用域内的默认设备上。举个例子,如果第一次调用 tf.random.get_global_generator 的位置在 tf.device("gpu") 作用域内,则会将全局生成器放置在 GPU 上,如果稍后要从 CPU 使用全局生成器,则会将其从 GPU 复制到 CPU。
另外还有一个函数 tf.random.set_global_generator,可用于将全局生成器替换为另一个生成器对象。使用该函数前要三思,因为 tf.function 可能已获得旧全局生成器(作为弱引用),所以,替换它可能导致它被回收,从而中断 tf.function。有一种更好的方法可以重置全局生成器,即使用一个“重置”函数(如 Generator.reset_from_seed),这样就不会创建新的生成器对象。
End of explanation
"""
g = tf.random.Generator.from_seed(1)
print(g.normal([]))
new_gs = g.split(3)
for new_g in new_gs:
print(new_g.normal([]))
print(g.normal([]))
"""
Explanation: 创建独立的随机数流
许多应用都需要多个独立的随机数流,所谓独立,就是指不能相互重叠,也不能有统计学上可检测到的相关性。通过使用 Generator.split 创建多个一定相互独立的生成器即可实现此目的(即生成独立流)。
End of explanation
"""
with tf.device("cpu"): # change "cpu" to the device you want
g = tf.random.get_global_generator().split(1)[0]
print(g.normal([])) # use of g won't cause cross-device copy, unlike the global generator
"""
Explanation: 与 normal 之类的 RNG 方法类似,split 会改变调用它的生成器的状态(上例中为 g)。除相互之间保持独立外,新生成器 (new_gs) 还一定独立于旧生成器 (g)。
当您想要确保使用的生成器位于与其他计算相同的设备上,从而避免跨设备复制的开销时,生成新生成器也很有用。例如:
End of explanation
"""
g = tf.random.Generator.from_seed(1)
@tf.function
def foo():
return g.normal([])
print(foo())
"""
Explanation: 注:在理论上,此处可以使用 from_seed(而不是 split)之类的构造函数获取新生成器,但这样做无法保证新生成器与全局生成器相互独立。同时也有使用同一种子或导致产生重叠随机数流的种子意外创建两个生成器的风险。
您可以在拆分的生成器上调用 split,从而以递归方式执行拆分。递归深度没有限制(除非发生整数溢出)。
与 tf.function 交互
与 tf.function 一起使用时,tf.random.Generator 遵循与 tf.Variable 相同的原则。这包括三个方面:
在 tf.function 的外部创建生成器
tf.function 可以使用在其外部创建的生成器。
End of explanation
"""
g = None
@tf.function
def foo():
global g
if g is None:
g = tf.random.Generator.from_seed(1)
return g.normal([])
print(foo())
print(foo())
"""
Explanation: 调用该函数时,用户需要确保生成器对象仍处于活动状态(没有被回收)。
在 tf.function 的内部创建生成器
只有 tf.function 第一次运行时,才可以在其内部创建生成器。
End of explanation
"""
num_traces = 0
@tf.function
def foo(g):
global num_traces
num_traces += 1
return g.normal([])
foo(tf.random.Generator.from_seed(1))
foo(tf.random.Generator.from_seed(2))
print(num_traces)
"""
Explanation: 将生成器作为参数传递给 tf.function
当用作 tf.function 的参数时,具有相同状态大小(状态大小由 RNG 算法确定)的不同生成器对象不会导致回溯 tf.function,而具有不同状态大小的不同生成器对象则会导致回溯。
End of explanation
"""
g = tf.random.Generator.from_seed(1)
strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"])
with strat.scope():
def f():
print(g.normal([]))
results = strat.run(f)
"""
Explanation: 与分布策略交互
Generator 与分布策略有三种交互方式。
在分布策略的外部创建生成器
如果是在策略作用域的外部创建的生成器,则会序列化访问此生成器的所有副本,因此,每一个副本都会得到不同的随机数。
End of explanation
"""
strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"])
with strat.scope():
try:
tf.random.Generator.from_seed(1)
except ValueError as e:
print("ValueError:", e)
"""
Explanation: 请注意,这种使用方法可能产生性能问题,因为生成器的设备与副本不同。
在分布策略的内部创建生成器
不允许在策略作用域内部创建生成器,因为这会导致在如何复制生成器方面出现歧义。比方说,是应该复制生成器,从而让每一个副本都获得相同的随机数,还是应该“拆分”,从而让每一个副本获得不同的随机数。
End of explanation
"""
strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"])
def f():
tf.random.Generator.from_seed(1)
try:
strat.run(f)
except ValueError as e:
print("ValueError:", e)
"""
Explanation: 请注意,Strategy.run 会在策略作用域内隐式运行参数函数:
End of explanation
"""
strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"])
gs = tf.random.get_global_generator().split(2)
# to_args is a workaround for the absence of APIs to create arguments for
# run. It will be replaced when such APIs are available.
def to_args(gs):
with strat.scope():
def f():
return [gs[tf.distribute.get_replica_context().replica_id_in_sync_group]]
return strat.run(f)
args = to_args(gs)
def f(g):
print(g.normal([]))
results = strat.run(f, args=args)
"""
Explanation: 将生成器作为参数传递给 Strategy.run
如果您希望每个副本都使用自己的生成器,则需要通过复制或拆分创建 n(n 表示副本数量)个生成器,然后将其作为参数传递给 Strategy.run。
End of explanation
"""
print(tf.random.stateless_normal(shape=[2, 3], seed=[1, 2]))
print(tf.random.stateless_normal(shape=[2, 3], seed=[1, 2]))
"""
Explanation: 无状态 RNG
无状态 RNG 的使用方法非常简单。因为它们是纯函数,不涉及状态或副作用。
End of explanation
"""
|
mercybenzaquen/foundations-homework
|
databases_hw/db04/Homework_4.ipynb
|
mit
|
numbers_str = '496,258,332,550,506,699,7,985,171,581,436,804,736,528,65,855,68,279,721,120'
"""
Explanation: Homework #4
These problem sets focus on list comprehensions, string operations and regular expressions.
Problem set #1: List slices and list comprehensions
Let's start with some data. The following cell contains a string with comma-separated integers, assigned to a variable called numbers_str:
End of explanation
"""
new_list = numbers_str.split(",")
numbers = [int(item) for item in new_list]
max(numbers)
"""
Explanation: In the following cell, complete the code with an expression that evaluates to a list of integers derived from the raw numbers in numbers_str, assigning the value of this expression to a variable numbers. If you do everything correctly, executing the cell should produce the output 985 (not '985').
End of explanation
"""
#len(numbers)
sorted(numbers)[10:]
"""
Explanation: Great! We'll be using the numbers list you created above in the next few problems.
In the cell below, fill in the square brackets so that the expression evaluates to a list of the ten largest values in numbers. Expected output:
[506, 528, 550, 581, 699, 721, 736, 804, 855, 985]
(Hint: use a slice.)
End of explanation
"""
sorted([item for item in numbers if item % 3 == 0])
"""
Explanation: In the cell below, write an expression that evaluates to a list of the integers from numbers that are evenly divisible by three, sorted in numerical order. Expected output:
[120, 171, 258, 279, 528, 699, 804, 855]
End of explanation
"""
from math import sqrt
# your code here
squared = []
for item in numbers:
if item < 100:
squared_numbers = sqrt(item)
squared.append(squared_numbers)
squared
"""
Explanation: Okay. You're doing great. Now, in the cell below, write an expression that evaluates to a list of the square roots of all the integers in numbers that are less than 100. In order to do this, you'll need to use the sqrt function from the math module, which I've already imported for you. Expected output:
[2.6457513110645907, 8.06225774829855, 8.246211251235321]
(These outputs might vary slightly depending on your platform.)
End of explanation
"""
planets = [
{'diameter': 0.382,
'mass': 0.06,
'moons': 0,
'name': 'Mercury',
'orbital_period': 0.24,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.949,
'mass': 0.82,
'moons': 0,
'name': 'Venus',
'orbital_period': 0.62,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 1.00,
'mass': 1.00,
'moons': 1,
'name': 'Earth',
'orbital_period': 1.00,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.532,
'mass': 0.11,
'moons': 2,
'name': 'Mars',
'orbital_period': 1.88,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 11.209,
'mass': 317.8,
'moons': 67,
'name': 'Jupiter',
'orbital_period': 11.86,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 9.449,
'mass': 95.2,
'moons': 62,
'name': 'Saturn',
'orbital_period': 29.46,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 4.007,
'mass': 14.6,
'moons': 27,
'name': 'Uranus',
'orbital_period': 84.01,
'rings': 'yes',
'type': 'ice giant'},
{'diameter': 3.883,
'mass': 17.2,
'moons': 14,
'name': 'Neptune',
'orbital_period': 164.8,
'rings': 'yes',
'type': 'ice giant'}]
"""
Explanation: Problem set #2: Still more list comprehensions
Still looking good. Let's do a few more with some different data. In the cell below, I've defined a data structure and assigned it to a variable planets. It's a list of dictionaries, with each dictionary describing the characteristics of a planet in the solar system. Make sure to run the cell before you proceed.
End of explanation
"""
[item['name'] for item in planets if item['diameter'] > 2]
#I got one more planet!
"""
Explanation: Now, in the cell below, write a list comprehension that evaluates to a list of names of the planets that have a diameter greater than four earth radii. Expected output:
['Jupiter', 'Saturn', 'Uranus']
End of explanation
"""
#sum([int(item['mass']) for item in planets])
sum([item['mass'] for item in planets])
"""
Explanation: In the cell below, write a single expression that evaluates to the sum of the mass of all planets in the solar system. Expected output: 446.79
End of explanation
"""
import re
planet_with_giant= [item['name'] for item in planets if re.search(r'\bgiant\b', item['type'])]
planet_with_giant
"""
Explanation: Good work. Last one with the planets. Write an expression that evaluates to the names of the planets that have the word giant anywhere in the value for their type key. Expected output:
['Jupiter', 'Saturn', 'Uranus', 'Neptune']
End of explanation
"""
import re
poem_lines = ['Two roads diverged in a yellow wood,',
'And sorry I could not travel both',
'And be one traveler, long I stood',
'And looked down one as far as I could',
'To where it bent in the undergrowth;',
'',
'Then took the other, as just as fair,',
'And having perhaps the better claim,',
'Because it was grassy and wanted wear;',
'Though as for that the passing there',
'Had worn them really about the same,',
'',
'And both that morning equally lay',
'In leaves no step had trodden black.',
'Oh, I kept the first for another day!',
'Yet knowing how way leads on to way,',
'I doubted if I should ever come back.',
'',
'I shall be telling this with a sigh',
'Somewhere ages and ages hence:',
'Two roads diverged in a wood, and I---',
'I took the one less travelled by,',
'And that has made all the difference.']
"""
Explanation: EXTREME BONUS ROUND: Write an expression below that evaluates to a list of the names of the planets in ascending order by their number of moons. (The easiest way to do this involves using the key parameter of the sorted function, which we haven't yet discussed in class! That's why this is an EXTREME BONUS question.) Expected output:
['Mercury', 'Venus', 'Earth', 'Mars', 'Neptune', 'Uranus', 'Saturn', 'Jupiter']
Problem set #3: Regular expressions
In the following section, we're going to do a bit of digital humanities. (I guess this could also be journalism if you were... writing an investigative piece about... early 20th century American poetry?) We'll be working with the following text, Robert Frost's The Road Not Taken. Make sure to run the following cell before you proceed.
End of explanation
"""
[item for item in poem_lines if re.search(r'\b[a-zA-Z]{4}\b \b[a-zA-Z]{4}\b', item)]
"""
Explanation: In the cell above, I defined a variable poem_lines which has a list of lines in the poem, and imported the re library.
In the cell below, write a list comprehension (using re.search()) that evaluates to a list of lines that contain two words next to each other (separated by a space) that have exactly four characters. (Hint: use the \b anchor. Don't overthink the "two words in a row" requirement.)
Expected result:
['Then took the other, as just as fair,',
'Had worn them really about the same,',
'And both that morning equally lay',
'I doubted if I should ever come back.',
'I shall be telling this with a sigh']
End of explanation
"""
[item for item in poem_lines if re.search(r'\b[a-zA-Z]{5}\b.?$',item)]
"""
Explanation: Good! Now, in the following cell, write a list comprehension that evaluates to a list of lines in the poem that end with a five-letter word, regardless of whether or not there is punctuation following the word at the end of the line. (Hint: Try using the ? quantifier. Is there an existing character class, or a way to write a character class, that matches non-alphanumeric characters?) Expected output:
['And be one traveler, long I stood',
'And looked down one as far as I could',
'And having perhaps the better claim,',
'Though as for that the passing there',
'In leaves no step had trodden black.',
'Somewhere ages and ages hence:']
End of explanation
"""
all_lines = " ".join(poem_lines)
"""
Explanation: Okay, now a slightly trickier one. In the cell below, I've created a string all_lines which evaluates to the entire text of the poem in one string. Execute this cell.
End of explanation
"""
re.findall(r'[I] (\b\w+\b)', all_lines)
"""
Explanation: Now, write an expression that evaluates to all of the words in the poem that follow the word 'I'. (The strings in the resulting list should not include the I.) Hint: Use re.findall() and grouping! Expected output:
['could', 'stood', 'could', 'kept', 'doubted', 'should', 'shall', 'took']
End of explanation
"""
entrees = [
"Yam, Rosemary and Chicken Bowl with Hot Sauce $10.95",
"Lavender and Pepperoni Sandwich $8.49",
"Water Chestnuts and Peas Power Lunch (with mayonnaise) $12.95 - v",
"Artichoke, Mustard Green and Arugula with Sesame Oil over noodles $9.95 - v",
"Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce $19.95",
"Rutabaga And Cucumber Wrap $8.49 - v"
]
"""
Explanation: Finally, something super tricky. Here's a list of strings that contains a restaurant menu. Your job is to wrangle this plain text, slightly-structured data into a list of dictionaries.
End of explanation
"""
menu = []
for item in entrees:
entrees_dictionary= {}
match = re.search(r'(.*) .(\d*\d\.\d{2})\ ?( - v+)?$', item)
if match:
name = match.group(1)
price= match.group(2)
#vegetarian= match.group(3)
if match.group(3):
entrees_dictionary['vegetarian']= True
else:
entrees_dictionary['vegetarian']= False
entrees_dictionary['name']= name
entrees_dictionary['price']= price
menu.append(entrees_dictionary)
menu
"""
Explanation: You'll need to pull out the name of the dish and the price of the dish. The v after the hyphen indicates that the dish is vegetarian---you'll need to include that information in your dictionary as well. I've included the basic framework; you just need to fill in the contents of the for loop.
Expected output:
[{'name': 'Yam, Rosemary and Chicken Bowl with Hot Sauce ',
'price': 10.95,
'vegetarian': False},
{'name': 'Lavender and Pepperoni Sandwich ',
'price': 8.49,
'vegetarian': False},
{'name': 'Water Chestnuts and Peas Power Lunch (with mayonnaise) ',
'price': 12.95,
'vegetarian': True},
{'name': 'Artichoke, Mustard Green and Arugula with Sesame Oil over noodles ',
'price': 9.95,
'vegetarian': True},
{'name': 'Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce ',
'price': 19.95,
'vegetarian': False},
{'name': 'Rutabaga And Cucumber Wrap ', 'price': 8.49, 'vegetarian': True}]
Great work! You are done. Go cavort in the sun, or whatever it is you students do when you're done with your homework
End of explanation
"""
|
ucsd-ccbb/mali-dual-crispr-pipeline
|
dual_crispr/distributed_files/notebooks/Dual CRISPR 6-Scoring Preparation.ipynb
|
mit
|
g_dataset_name = "Notebook6Test"
g_library_fp = '~/dual_crispr/library_definitions/test_library_2.txt'
g_count_fps_or_dirs = '/home/ec2-user/dual_crispr/test_data/test_set_6a,/home/ec2-user/dual_crispr/test_data/test_set_6b'
g_time_prefixes = "T,D"
g_prepped_counts_run_prefix = ""
g_prepped_counts_dir = '~/dual_crispr/test_outputs/test_set_6'
"""
Explanation: Dual CRISPR Screen Analysis
Step 6: Scoring Preparation
Amanda Birmingham, CCBB, UCSD (abirmingham@ucsd.edu)
Instructions
To run this notebook reproducibly, follow these steps:
1. Click Kernel > Restart & Clear Output
2. When prompted, click the red Restart & clear all outputs button
3. Fill in the values for your analysis for each of the variables in the Input Parameters section
4. Click Cell > Run All
Input Parameters
End of explanation
"""
import inspect
import ccbb_pyutils.analysis_run_prefixes as ns_runs
import ccbb_pyutils.files_and_paths as ns_files
import ccbb_pyutils.notebook_logging as ns_logs
def describe_var_list(input_var_name_list):
description_list = ["{0}: {1}\n".format(name, eval(name)) for name in input_var_name_list]
return "".join(description_list)
ns_logs.set_stdout_info_logger()
import dual_crispr.count_combination as ns_combine
print(inspect.getsource(ns_combine.get_combined_counts_file_suffix))
import ccbb_pyutils.string_utils as ns_string
print(inspect.getsource(ns_string.split_delimited_string_to_list))
import os
def get_count_file_fps(comma_sep_fps_or_dirs_str):
result = []
fps_or_dirs = comma_sep_fps_or_dirs_str.split(",")
for curr_fp_or_dir in fps_or_dirs:
trimmed_curr = curr_fp_or_dir.strip()
trimmed_curr = ns_files.expand_path(trimmed_curr)
if os.path.isdir(trimmed_curr):
combined_counts_fps = ns_files.get_filepaths_from_wildcard(trimmed_curr,
ns_combine.get_combined_counts_file_suffix())
result.extend(combined_counts_fps)
else:
result.append(trimmed_curr)
return result
g_library_fp = ns_files.expand_path(g_library_fp)
g_count_file_fps = get_count_file_fps(g_count_fps_or_dirs)
g_prepped_counts_run_prefix = ns_runs.check_or_set(g_prepped_counts_run_prefix,
ns_runs.generate_run_prefix(g_dataset_name))
g_time_prefixes_list = ns_string.split_delimited_string_to_list(g_time_prefixes)
g_prepped_counts_dir = ns_files.expand_path(g_prepped_counts_dir)
print(describe_var_list(['g_library_fp', 'g_count_file_fps', 'g_prepped_counts_run_prefix', 'g_time_prefixes_list']))
ns_files.verify_or_make_dir(g_prepped_counts_dir)
"""
Explanation: Automated Set-Up
End of explanation
"""
import dual_crispr.scoring_prep as ns_prep
print(inspect.getsource(ns_prep))
def merge_and_write_timepoint_counts(count_file_fps, constructs_fp, run_prefix, dataset_name, time_prefixes_list,
output_dir, disregard_order=True):
joined_df = ns_prep.merge_and_annotate_counts(count_file_fps, constructs_fp, dataset_name,
time_prefixes_list, disregard_order=True)
prepped_file_suffix = ns_prep.get_prepped_file_suffix()
output_fp = ns_files.build_multipart_fp(output_dir, [run_prefix, prepped_file_suffix])
joined_df.to_csv(output_fp, index=False, sep='\t')
merge_and_write_timepoint_counts(g_count_file_fps, g_library_fp, g_prepped_counts_run_prefix, g_dataset_name,
g_time_prefixes_list, g_prepped_counts_dir, True)
print(ns_files.check_file_presence(g_prepped_counts_dir, g_prepped_counts_run_prefix,
ns_prep.get_prepped_file_suffix(),
check_failure_msg="Scoring preparation failed to produce an output file."))
"""
Explanation: Scoring-Ready File Preparation
End of explanation
"""
|
dynaryu/rmtk
|
rmtk/vulnerability/derivation_fragility/hybrid_methods/CSM/CSM.ipynb
|
agpl-3.0
|
import capacitySpectrumMethod
from rmtk.vulnerability.common import utils
%matplotlib inline
"""
Explanation: Capacity Spectrum Method (CSM)
The Capacity Spectrum Method (CSM) is a procedure capable of estimating the nonlinear response of structures, utilizing overdamped response spectra. These response spectra can either be obtained from a building code or derived based on ground motion records.
The figure below illustrates a fragility model developed using this method.
<img src="../../../../../figures/fragility_example.png" width="400" align="middle">
Note: To run the code in a cell:
Click on the cell to select it.
Press SHIFT+ENTER on your keyboard or press the play button (<button class='fa fa-play icon-play btn btn-xs btn-default'></button>) in the toolbar above.
End of explanation
"""
capacity_curves_file = "../../../../../../rmtk_data/capacity_curves_Sa-Sd.csv"
capacity_curves = utils.read_capacity_curves(capacity_curves_file)
utils.plot_capacity_curves(capacity_curves)
"""
Explanation: Load capacity curves
In order to use this methodology, it is necessary to provide one (or a group) of capacity curves, defined according to the format described in the RMTK manual.
Please provide the location of the file containing the capacity curves using the parameter capacity_curves_file.
End of explanation
"""
gmrs_folder = "../../../../../../rmtk_data/accelerograms"
minT, maxT = 0.1, 2.0
gmrs = utils.read_gmrs(gmrs_folder)
#utils.plot_response_spectra(gmrs, minT, maxT)
"""
Explanation: Load ground motion records
Please indicate the path to the folder containing the ground motion records to be used in the analysis through the parameter gmrs_folder.
Note: Each accelerogram needs to be in a separate CSV file as described in the RMTK manual.
The parameters minT and maxT are used to define the period bounds when plotting the spectra for the provided ground motion fields.
End of explanation
"""
damage_model_file = "../../../../../../rmtk_data/damage_model.csv"
damage_model = utils.read_damage_model(damage_model_file)
"""
Explanation: Load damage state thresholds
Please provide the path to your damage model file using the parameter damage_model_file in the cell below.
The damage types currently supported are: capacity curve dependent, spectral displacement and interstorey drift. If the damage model type is interstorey drift the user can provide the pushover curve in terms of Vb-dfloor to be able to convert interstorey drift limit states to roof displacements and spectral displacements, otherwise a linear relationship is assumed.
End of explanation
"""
damping_model = "Iwan_1980"
damping_ratio = 0.05
PDM, Sds = capacitySpectrumMethod.calculate_fragility(capacity_curves, gmrs, damage_model,
damping_model, damping_ratio)
"""
Explanation: Obtain the damage probability matrix
The following parameters need to be defined in the cell below in order to calculate the damage probability matrix:
1. damping_model: This parameter defines the type of damping model to be used in the analysis. The valid options are listed below; please refer to the RMTK manual for additional details about these models:
1. "FEMA_2005"
2. "Kowalsky_1994"
3. "Iwan_1980"
4. "Gulkan_Sozen_1974"
5. "Priesley_et_al2007_frames"
6. "Priesley_et_al2007_walls"
7. "Calvi_1999"
2. damping_ratio: This parameter defines the damping ratio for the structure
End of explanation
"""
IMT = "Sa"
period = 0.3
regression_method = "least squares"
fragility_model = utils.calculate_mean_fragility(gmrs, PDM, period, damping_ratio,
IMT, damage_model, regression_method)
"""
Explanation: Fit lognormal CDF fragility curves
The following parameters need to be defined in the cell below in order to fit lognormal CDF fragility curves to the damage probability matrix obtained above:
1. IMT: This parameter specifies the intensity measure type to be used. Currently supported options are "PGA", "Sd" and "Sa".
2. period: This parameter defines the time period of the fundamental mode of vibration of the structure.
3. regression_method: This parameter defines the regression method to be used for estimating the parameters of the fragility functions. The valid options are "least squares" and "max likelihood".
End of explanation
"""
minIML, maxIML = 0.01, 2.00
utils.plot_fragility_model(fragility_model, minIML, maxIML)
# utils.plot_fragility_stats(fragility_statistics,minIML,maxIML)
"""
Explanation: Plot fragility functions
The following parameters need to be defined in the cell below in order to plot the lognormal CDF fragility curves obtained above:
* minIML and maxIML: These parameters define the limits of the intensity measure level for plotting the functions
End of explanation
"""
taxonomy = "RC"
minIML, maxIML = 0.01, 2.00
output_type = "csv"
output_path = "../../../../../../rmtk_data/output/"
utils.save_mean_fragility(taxonomy, fragility_model, minIML, maxIML, output_type, output_path)
"""
Explanation: Save fragility functions
The derived parametric fragility functions can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:
1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.
2. minIML and maxIML: These parameters define the bounds of applicability of the functions.
3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are "csv" and "nrml".
End of explanation
"""
cons_model_file = "../../../../../../rmtk_data/cons_model.csv"
imls = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50,
0.60, 0.70, 0.80, 0.90, 1.00, 1.20, 1.40, 1.60, 1.80, 2.00,
2.20, 2.40, 2.60, 2.80, 3.00, 3.20, 3.40, 3.60, 3.80, 4.00]
distribution_type = "lognormal"
cons_model = utils.read_consequence_model(cons_model_file)
vulnerability_model = utils.convert_fragility_vulnerability(fragility_model, cons_model,
imls, distribution_type)
"""
Explanation: Obtain vulnerability function
A vulnerability model can be derived by combining the set of fragility functions obtained above with a consequence model. In this process, the fractions of buildings in each damage state are multiplied by the associated damage ratio from the consequence model, in order to obtain a distribution of loss ratio for each intensity measure level.
The following parameters need to be defined in the cell below in order to calculate vulnerability functions using the above derived fragility functions:
1. cons_model_file: This parameter specifies the path of the consequence model file.
2. imls: This parameter specifies a list of intensity measure levels in increasing order at which the distribution of loss ratios are required to be calculated.
3. distribution_type: This parameter specifies the type of distribution to be used for calculating the vulnerability function. The distribution types currently supported are "lognormal", "beta", and "PMF".
End of explanation
"""
utils.plot_vulnerability_model(vulnerability_model)
"""
Explanation: Plot vulnerability function
End of explanation
"""
taxonomy = "RC"
output_type = "csv"
output_path = "../../../../../../rmtk_data/output/"
utils.save_vulnerability(taxonomy, vulnerability_model, output_type, output_path)
"""
Explanation: Save vulnerability function
The derived parametric or nonparametric vulnerability function can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:
1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.
3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are "csv" and "nrml".
End of explanation
"""
|
mercye/foundations-homework
|
07/.ipynb_checkpoints/Homework_7_Emelike-checkpoint.ipynb
|
mit
|
import pandas as pd
"""
Explanation: Part One
Use the csv I've attached to answer the following questions:
1) Import pandas with the right name
End of explanation
"""
!pip install matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: 2) Set all graphics from matplotlib to display inline
End of explanation
"""
df = pd.read_csv("07-hw-animals copy.csv")
"""
Explanation: 3) Read the csv in (it should be UTF-8 already so you don't have to worry about encoding), save it with the proper boring name
End of explanation
"""
df.columns.values
"""
Explanation: 4) Display the names of the columns in the csv
End of explanation
"""
df.head(3)
"""
Explanation: 5) Display the first 3 animals.
End of explanation
"""
df.sort_values(by='length', ascending = False).head(3)
"""
Explanation: 6) Sort the animals to see the 3 longest animals.
End of explanation
"""
df['animal'].value_counts()
"""
Explanation: 7) What are the counts of the different values of the "animal" column? a.k.a. how many cats and how many dogs.
End of explanation
"""
df[df['animal'] == 'dog']
"""
Explanation: 8) Only select the dogs.
End of explanation
"""
df[df['length']>40]
"""
Explanation: 9) Display all of the animals that are greater than 40 cm.
End of explanation
"""
df['inches'] = df['length']*0.393701
"""
Explanation: 10) 'length' is the animal's length in cm. Create a new column called inches that is the length in inches.
End of explanation
"""
cats = df[df['animal'] =='cat']
dogs = df[df['animal'] == 'dog']
"""
Explanation: 11) Save the cats to a separate variable called "cats." Save the dogs to a separate variable called "dogs."
End of explanation
"""
cats[cats['inches']>12]
df[(df['animal']=='cat') & (df['inches']>12)]
"""
Explanation: 12) Display all of the animals that are cats and above 12 inches long. First do it using the "cats" variable, then do it using your normal dataframe.
End of explanation
"""
cats.describe()
"""
Explanation: 13) What's the mean length of a cat?
End of explanation
"""
dogs.describe()
"""
Explanation: the mean length of a cat is 14.698 inches
14) What's the mean length of a dog?
End of explanation
"""
df.groupby('animal').mean()
"""
Explanation: the mean length of a dog is 19.685
15) Use groupby to accomplish both of the above tasks at once.
End of explanation
"""
dogs.hist('length')
"""
Explanation: 16) Make a histogram of the length of dogs. I apologize that it is so boring.
End of explanation
"""
df.plot(kind='bar', x='name', y='length', legend=False)
"""
Explanation: 17) Change your graphing style to be something else (anything else!)
End of explanation
"""
df.plot(kind='barh', x='animal', y='length', legend=False)
"""
Explanation: 18) Make a horizontal bar graph of the length of the animals, with their name as the label (look at the billionaires notebook I put on Slack!)
End of explanation
"""
sortedcats = cats.sort_values(by='length', ascending = True)
sortedcats.plot(kind='barh', x='animal', y='length', legend=False)
"""
Explanation: 19) Make a sorted horizontal bar graph of the cats, with the larger cats on top.
End of explanation
"""
df = pd.read_excel('billionaires copy.xlsx')
df.columns.values
recent = df[df['year']==2014]
recent.head(5)
"""
Explanation: Part Two
End of explanation
"""
recent.sort_values(by='networthusbillion', ascending=False).head(10)
"""
Explanation: 1) What country are most billionaires from? For the top ones, how many billionaires per billion people?
2) Who are the top 10 richest billionaires?
End of explanation
"""
recent.groupby('gender').mean()
"""
Explanation: 3) What's the average wealth of a billionaire? Male? Female?
End of explanation
"""
recent.sort_values('networthusbillion').head(10)
"""
Explanation: 4) Who is the poorest billionaire? Who are the top 10 poorest billionaires?
End of explanation
"""
rel_counts = recent.groupby('relationshiptocompany').count()
rel_counts.sort_values('year', ascending=False).head(10)
#relationship to company describes the role a person plays in a company
#most common relationshops are founder, relation, owner, chairman, and investor
"""
Explanation: 5) 'What is relationship to company'? And what are the most common relationships?
End of explanation
"""
source_counts = recent.groupby('sourceofwealth')
"""
Explanation: 6) Most common source of wealth? Male vs. female?
End of explanation
"""
|
darcamo/pyphysim
|
ipython_notebooks/METIS Simple Scenario.ipynb
|
gpl-2.0
|
%matplotlib inline
# xxxxxxxxxx Add the parent folder to the python path. xxxxxxxxxxxxxxxxxxxx
import sys
import os
sys.path.append('../')
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.html import widgets
from IPython.display import display_latex
# Import the simulation runner
from apps.metis_scenarios.simulate_metis_scenario import *
"""
Explanation: Simulation of the METIS scenario with rooms in one floor
This notebook simulates the scenario with one access point in each room of a given floor building.
Some Initialization Code
First we do some initializations and import the required modules.
End of explanation
"""
scenario_params = {
'side_length': 10, # 10 meters side length
'single_wall_loss_dB': 5,
'num_rooms_per_side': 12,
'ap_decimation': 1}
power_params = {
'Pt_dBm': 20, # 20 dBm transmit power
'noise_power_dBm': -300 # Very low noise power
}
"""
Explanation: Simulation Configuration
Now we set the simulation configuration.
End of explanation
"""
out = perform_simulation_SINR_heatmap(scenario_params, power_params)
(sinr_array_pl_nothing_dB,
sinr_array_pl_3gpp_dB,
sinr_array_pl_free_space_dB,
sinr_array_pl_metis_ps7_dB) = out
num_discrete_positions_per_room = 15
sinr_array_pl_nothing_dB2 = prepare_sinr_array_for_color_plot(
sinr_array_pl_nothing_dB,
scenario_params['num_rooms_per_side'],
num_discrete_positions_per_room)
sinr_array_pl_3gpp_dB2 = prepare_sinr_array_for_color_plot(
sinr_array_pl_3gpp_dB,
scenario_params['num_rooms_per_side'],
num_discrete_positions_per_room)
sinr_array_pl_free_space_dB2 = prepare_sinr_array_for_color_plot(
sinr_array_pl_free_space_dB,
scenario_params['num_rooms_per_side'],
num_discrete_positions_per_room)
sinr_array_pl_metis_ps7_dB2 = prepare_sinr_array_for_color_plot(
sinr_array_pl_metis_ps7_dB,
scenario_params['num_rooms_per_side'],
num_discrete_positions_per_room)
"""
Explanation: Perform the Simulation
calculate the SINRs
End of explanation
"""
print(("Min/Mean/Max SINR value (no PL):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_nothing_dB.min(),
sinr_array_pl_nothing_dB.mean(),
sinr_array_pl_nothing_dB.max()))
print(("Min/Mean/Max SINR value (3GPP):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_3gpp_dB.min(),
sinr_array_pl_3gpp_dB.mean(),
sinr_array_pl_3gpp_dB.max()))
print(("Min/Mean/Max SINR value (Free Space):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_free_space_dB.min(),
sinr_array_pl_free_space_dB.mean(),
sinr_array_pl_free_space_dB.max()))
print(("Min/Mean/Max SINR value (METIS PS7):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_metis_ps7_dB.min(),
sinr_array_pl_metis_ps7_dB.mean(),
sinr_array_pl_metis_ps7_dB.max()))
"""
Explanation: Print Min/Mean/Max SIR values (no noise)
End of explanation
"""
fig1, ax1 = plt.subplots(figsize=(10, 8))
print("Max SINR: {0}".format(sinr_array_pl_nothing_dB.max()))
print("Min SINR: {0}".format(sinr_array_pl_nothing_dB.min()))
print("Mean SINR: {0}".format(sinr_array_pl_nothing_dB.mean()))
im1 = ax1.imshow(sinr_array_pl_nothing_dB2, interpolation='nearest', vmax=-1.5, vmin=-5)
fig1.colorbar(im1)
plt.show()
"""
Explanation: Create the Plots for the different cases
First we will create the plots for a noise variance equal to zero.
Plot case without path loss (only wall loss)
End of explanation
"""
fig2, ax2 = plt.subplots(figsize=(10, 8))
print("Max SINR: {0}".format(sinr_array_pl_3gpp_dB.max()))
print("Min SINR: {0}".format(sinr_array_pl_3gpp_dB.min()))
print("Mean SINR: {0}".format(sinr_array_pl_3gpp_dB.mean()))
im2 = ax2.imshow(sinr_array_pl_3gpp_dB2, interpolation='nearest', vmax=30, vmin=-2.5)
fig2.colorbar(im2)
plt.show()
"""
Explanation: Plot case with 3GPP path loss
End of explanation
"""
fig3, ax3 = plt.subplots(figsize=(10, 8))
print("Max SINR: {0}".format(sinr_array_pl_free_space_dB.max()))
print("Min SINR: {0}".format(sinr_array_pl_free_space_dB.min()))
print("Mean SINR: {0}".format(sinr_array_pl_free_space_dB.mean()))
im3 = ax3.imshow(sinr_array_pl_free_space_dB2, interpolation='nearest', vmax=30, vmin=-2.5)
fig3.colorbar(im3)
plt.show()
"""
Explanation: Case with Free Space Path Loss
End of explanation
"""
fig4, ax4 = plt.subplots(figsize=(10, 8))
print("Max SINR: {0}".format(sinr_array_pl_metis_ps7_dB.max()))
print("Min SINR: {0}".format(sinr_array_pl_metis_ps7_dB.min()))
print("Mean SINR: {0}".format(sinr_array_pl_metis_ps7_dB.mean()))
im4 = ax4.imshow(sinr_array_pl_metis_ps7_dB2, interpolation='nearest', vmax=30, vmin=-2.5)
fig4.colorbar(im4)
plt.show()
"""
Explanation: Plot case with METIS PS7 path loss
End of explanation
"""
@interact(Pt_dBm=(0., 40., 5.), noise_power_dBm=(-160., 0.0, 5.), pl_model=['nothing', '3gpp', 'free_space', 'metis'], ap_decimation=['1', '2', '4', '9'])
def plot_SINRs(Pt_dBm=30., noise_power_dBm=-160, pl_model='3gpp', ap_decimation=1):
scenario_params = {
'side_length': 10, # 10 meters side length
'single_wall_loss_dB': 5,
'num_rooms_per_side': 12,
'ap_decimation': int(ap_decimation)}
power_params = {
'Pt_dBm': Pt_dBm, # 20 dBm transmit power
'noise_power_dBm': noise_power_dBm # Very low noise power
}
out = perform_simulation_SINR_heatmap(scenario_params, power_params)
(sinr_array_pl_nothing_dB,
sinr_array_pl_3gpp_dB,
sinr_array_pl_free_space_dB,
sinr_array_pl_metis_ps7_dB) = out
#sinr_array_pl_nothing_dB, sinr_array_pl_3gpp_dB, sinr_array_pl_free_space_dB, sinr_array_pl_metis_ps7_dB = calc_SINRs(Pt_dBm, noise_var)
fig, ax = plt.subplots(figsize=(10, 8))
if pl_model == 'nothing':
im = ax.imshow(sinr_array_pl_nothing_dB2, interpolation='nearest', vmax=-1.5, vmin=-5.)
fig.colorbar(im)
plt.show()
print(("Min/Mean/Max SINR value (no PL):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_nothing_dB.min(),
sinr_array_pl_nothing_dB.mean(),
sinr_array_pl_nothing_dB.max()))
elif pl_model == '3gpp':
im = ax.imshow(sinr_array_pl_3gpp_dB2, interpolation='nearest', vmax=30, vmin=-2.5)
fig.colorbar(im)
ax.set_title('ka')
plt.show()
print(("Min/Mean/Max SINR value (3GPP):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_3gpp_dB.min(),
sinr_array_pl_3gpp_dB.mean(),
sinr_array_pl_3gpp_dB.max()))
elif pl_model == 'free_space':
im = ax.imshow(sinr_array_pl_free_space_dB2, interpolation='nearest', vmax=30, vmin=-2.5)
fig.colorbar(im)
plt.show()
print(("Min/Mean/Max SINR value (Free Space):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_free_space_dB.min(),
sinr_array_pl_free_space_dB.mean(),
sinr_array_pl_free_space_dB.max()))
elif pl_model == 'metis':
im = ax.imshow(sinr_array_pl_metis_ps7_dB2, interpolation='nearest', vmax=30, vmin=-2.5)
fig.colorbar(im)
plt.show()
print(("Min/Mean/Max SINR value (METIS PS7):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_metis_ps7_dB.min(),
sinr_array_pl_metis_ps7_dB.mean(),
sinr_array_pl_metis_ps7_dB.max()))
else:
raise ValueError('Invalid path loss model: {0}'.format(pl_model))
"""
Explanation: Create the plots with interact
Here we repeat the plots, but now using IPython interact. This allow us to change unput parameters and see the result in the plot.
End of explanation
"""
|
gabrielhpbc/CD
|
APS8_QUESTOES.ipynb
|
mit
|
from scipy import stats
Prob = 1-(stats.norm.cdf(5,loc=5.5,scale=1.07))
Prob
"""
Explanation: APS 8
Entrega: 28/11 ao final do atendimento (17:15)
Questão 1
Assuma que $X$ seja uma variável aleatória contínua que descreve o preço de um multímetro digital em uma loja brasileira qualquer. Ainda, assuma que o preço médio seja igual a $R\$ 45,00$ e o desvio padrão, igual a $R\$45,00$ e o desvio padrão, igual a $R\$ 25,00$.
Em todos os itens a seguir, deixe claro como foi construído o novo preço em função de $X$.
Deixe todas as contas claramente indicadas.
a) Se todas as lojas brasileiras que vendem multímetro resolvessem acrescentar 15 reais no preço de cada um dos seus respectivos multímetros. Nesse caso, qual o novo preço médio do multímetro e respectivo desvio padrão?
b) Se todas as lojas brasileiras que vendem multímetro resolvessem aumentar o preço de cada um dos seus respectivos multímetros em $150\%$. Nesse caso, qual o novo preço médio do multímetro e respectivo desvio padrão?
c) Se você precisa comprar dez multímetros e deseja comprá-los todos iguais e na mesma loja. Nesse caso, qual o gasto total esperado e respectivo desvio padrão?
Respostas
a) Nesse caso a diferença se observa apenas no preço médio, que nesse caso aumenta também em 15,00 reais, de acordo com a seguinte fórmula : E(X+d) = 45 + 15 = 60, ou seja X o preço anterior, d o acréscimo e o novo preço médio é de 60,00 reais. Já o desvio padrão não se altera nesse caso.
b) Como dessa forma há uma alteração de maneira "multiplicativa", segue-se a seguinte lógica: E(cX+d) = (45 x 1,5) + 45 = 112,5 reias, enquanto o novo desvio padrão será a raiz de, Var(cX+d) = 2,5^2 x 25^2 = 62,5 reais é o novo desvio padrão.
c) Partindo-se do fato de que o preço irá se repetir, afinal serão escolhas iguais, o gasto total esperado é a média multiplicada pelo número de produtos comprados, portanto 10x45 = 450 reais, e o desvio padrão da mesma maneira, 25 x 10 = 250 reais de desvio.
Questão 2
Assuma que, na disciplina Ciência dos dados, a nota final da disciplina seja função da nota média das provas $X$ e da nota média dos projetos $Y$. Assuma ainda que ambas as notas tem mesmo peso na construção da nota final na disciplina.
Considerando que a nota média dos alunos nas provas segue uma normal com média 4 e desvio padrão igual a 1,5, ou seja $ X \sim N(4 ; 2,25)$ . E que a nota média dos alunos nos projetos segue uma normal com média 7 e desvio padrão igual a 1, ou seja, $Y\sim N(7 ; 1)$.
Na prática, é razoável acreditar que haja uma associação positiva entre essas notas. Aqui, considere que a covariância entre essas notas médias é igual a $0,675$ .
Em todos os itens a seguir, deixe todas as contas claramente indicadas.
a) Calcule a nota média final e respectivo desvio padrão.
b) Considere que a nota final na disciplina Ciência dos dados segue uma distribuição com média e variância obtidas no item anterior. Calcule a probabilidade de um aluno ser aprovado na disciplina.
Respostas
a) Pautando-se nas propriedades de esperança e variância, calcula-se a nota média final por E(aX+bY) = aE(X) + bE(Y) = 5.5, afinal ambas tem o mesmo peso (50%) e "a" nota 4, e "b" nota 7. Já para o desvio padrão, deve se fazer o cálculo a partir da variância e então tirar sua raiz, sendo que, Var(aX+bY) = (0.5^2)x2.25 + (0.5^2)x1 + (2x0.5x0.5)x0.675 = 1.15, portanto o desvio padrão é igual 1.07.
End of explanation
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy import stats
%matplotlib inline
houses = pd.read_csv("houses.csv")
houses.head()
"""
Explanation: b)A partir do cálculo da cdf, sabe-se que a probabilidade de um aluno ser aprovado nessa disciplina é igual a 67,98%.
Dataset para as questões de programação
Vamos trabalhar com o problema House Sales in King County, do Kaggle
https://www.kaggle.com/harlfoxem/housesalesprediction
End of explanation
"""
tamanho = houses.sqft_lot
stats.probplot(tamanho, dist='norm', plot=plt)
plt.show()
"""
Explanation: Q3
O dataset fornecido é uma amostra extraída de um dataset maior. A amostra pode ter vindo de uma população com média de sqft_lot de 8250?
Passos:
a) Verifique que sqft_lot não é normal.
Dica: na APS 7 foi feito um teste de normalidade. Repita o mesmo processo
b) Se sqft_lot fosse normal, que tipo de teste poderíamos ter usado?
c) Saberíamos $\mu$ para montar o t? Saberíamos $\sigma$?
Bootstrap:
d) Monte um intervalo percentil boostrap de $95\%$.
e) Informe os valores inferiores e superiores
f) Conclua o teste
Respostas
End of explanation
"""
am = []
for i in houses.sqft_lot:
am.append(i)
al = 0.05
n = len(am)
s = np.std(am, ddof=1)
m_ = np.mean(am)
l = []
for i in range(10000):
#valor aleatório para toda a porcentagem que se deseja analisar o bootsrap
a = np.random.choice(am,size = n, replace = True).mean()
l.append(a)
#Valores Máximo e Mínimo do boostrap criado
print("Valor mínimo igual a: ")
print(pd.Series(l).quantile(al/2))
print("Valor Máximo igual a: ")
print(pd.Series(l).quantile(1-(al/2)))
"""
Explanation: Pela distribuição dos pontos ao longo da reta, pode-se afirmar que não se trata de uma normal.
Caso a distribuição se tratasse de maneira normal, então poderia ser realizado um teste de hipótese para análise desta distribuição.
O valor de σ não seria conhecido, apesar de ser possível encontrar um valor aproximado deste, por outro lado o valor de μ seria conhecido e poderia ser prontamente utilizado para avaliação da distribuição normal.
End of explanation
"""
hp = houses.price
hl = houses["sqft_living"]
hl = sm.add_constant(hl, prepend=True)
res = sm.OLS(hp,hl).fit()
res.summary()
"""
Explanation: Q4
Faça uma regressão que usa sqft_living como variável explicativa e price como resultado:
a). De acordo com a notação que usamos em aula, quem é $\hat{\beta_0}$ e quem é $\hat{\beta_1}$ ?
End of explanation
"""
B0 = res.params.const
B1 = res.params.sqft_living
print("B0 é igual a: ")
print(B0)
print("B1 é igual a: ")
print(B1)
"""
Explanation: A partir da tabela sabe-se os valor de beta0 e beta1 e portanto:
End of explanation
"""
print("House.price = {0} + {1} hl, sendo hp o preço e hl o sqft_living, ou seja tamanho do interior habitável da casa".format(B0,B1))
"""
Explanation: b). Interprete o $R^2$ , os valores $P > |t|$ e também Prob (F-statistic).
O valor de R² é basicamente a demonstração numérica de quanto o modelo é capaz de os valores, ou seja uma espécie de ajuste do mesmo. Enquanto o P>|t|é apenas o calculo da probabilidade de que não existe relação ou influência entre variáveis determinadas.
c) Escreva a equação que expressa price em função de sqft_living.
End of explanation
"""
houses.plot.scatter("sqft_living","price")
hp = 49281.45802 + 247.080994*houses["sqft_living"]
plt.plot(houses['sqft_living'],hp,"green")
"""
Explanation: d) Faça um gráfico de dispersão de price em função de sqft_living, plote a equação que encontrou no item anterior no mesmo gráfico
End of explanation
"""
a2 = houses['price']
hloc = houses.loc[:, ['sqft_lot', 'sqft_basement']]
hloc = sm.add_constant(hloc, prepend=True)
resultados = sm.OLS(a2,hloc).fit()
resultados.summary()
"""
Explanation: Q5
Faça agora uma regressão múltipla que tenta prever price (variável explicada) em função das variáveis sqft_lot e sqft_basement (explicativa).
Dica: inclua as duas colunas dentro de uma variável X, o restante não se altera em relação à regressão simples.
python
Y = houses['price']
X = houses.loc[:, ['sqft_lot', 'sqft_basement']]
Pede-se:
a) Repita a intepretação dos valores P>|t|P>|t| e Prob (F-statistic) porém com duas variáveis.
O que os valores encontrados implicam para a regressão?
Esses valores, P>|t|P>|t| e Prob (F-statistic) demonstram ideia de relação que entre para as duas variáveis.
b) Escreva a função que foi encontrada pela regressão ( price em função de sqft_lot e sqft_basement)
End of explanation
"""
|
bjshaw/phys202-2015-work
|
assignments/assignment05/InteractEx02.ipynb
|
mit
|
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
"""
Explanation: Interact Exercise 2
Imports
End of explanation
"""
import math as math
def plot_sine1(a, b):
x = np.linspace(0,4*math.pi,300)
f = plt.figure(figsize=(15,5))
plt.plot(np.sin(a*x+b))
plt.title('Sine Graph')
plt.xlabel('x')
plt.ylabel('sin(ax+b)')
plt.tick_params(right=False, top=False, direction='out')
plt.xticks([0,math.pi,2*math.pi,3*math.pi,4*math.pi],['0','$pi$','$2*pi$','$3*pi$','$4*pi$'])
plt.xlim(0,4*math.pi)
plot_sine1(5, 3.4)
"""
Explanation: Plotting with parameters
Write a plot_sin1(a, b) function that plots $sin(ax+b)$ over the interval $[0,4\pi]$.
Customize your visualization to make it effective and beautiful.
Customize the box, grid, spines and ticks to match the requirements of this data.
Use enough points along the x-axis to get a smooth plot.
For the x-axis tick locations use integer multiples of $\pi$.
For the x-axis tick labels use multiples of pi using LaTeX: $3\pi$.
End of explanation
"""
interact(plot_sine1, a=(0.0,5.0,0.1), b=(-5.0,5.0,0.1))
assert True # leave this for grading the plot_sine1 exercise
"""
Explanation: Then use interact to create a user interface for exploring your function:
a should be a floating point slider over the interval $[0.0,5.0]$ with steps of $0.1$.
b should be a floating point slider over the interval $[-5.0,5.0]$ with steps of $0.1$.
End of explanation
"""
def plot_sine2(a,b,style='b-'):
x = np.linspace(0,4*math.pi,300)
f = plt.figure(figsize=(15,5))
plt.plot(np.sin(a*x+b), '%s' % style)
plt.title('Sine Graph')
plt.xlabel('x')
plt.ylabel('sin(ax+b)')
plt.tick_params(right=False, top=False, direction='out')
plt.xticks([0,math.pi,2*math.pi,3*math.pi,4*math.pi],['0','$pi$','$2*pi$','$3*pi$','$4*pi$'])
plt.xlim(0,4*math.pi)
plot_sine2(4.0, -1.0, 'r--')
"""
Explanation: In matplotlib, the line style and color can be set with a third argument to plot. Examples of this argument:
dashed red: r--
blue circles: bo
dotted black: k.
Write a plot_sine2(a, b, style) function that has a third style argument that allows you to set the line style of the plot. The style should default to a blue line.
End of explanation
"""
interact(plot_sine2, a=(0.0,5.0,0.1), b=(-5.0,5.0,0.1), style={'dotted blue line': 'b--', 'black circles': 'ko',
'red triangles': 'r^'})
assert True # leave this for grading the plot_sine2 exercise
"""
Explanation: Use interact to create a UI for plot_sine2.
Use a slider for a and b as above.
Use a drop down menu for selecting the line style between a dotted blue line line, black circles and red triangles.
End of explanation
"""
|
bigdata-i523/hid335
|
experiment/Python_SKL_SupportVectorClassifier.ipynb
|
gpl-3.0
|
import sklearn
import mglearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Introduction to Machine Learning
Andreas Mueller and Sarah Guido (2017) O'Reilly
Ch. 2 Supervised Learning
Support Vector Machines (SVM)
Understanding SVMs
During Training:
* SVM learns how important each of data points in train set is to represent the decision boundary between two classes
* Subset of data points on the border between classes (i.e., "support vectors") are used to define the decision boundary
Making a Prediction
To make prediction for new point, distance to each of the support vectors is measured
Classification decision is based on distances to the support vectors, and importance of support vectors learned in training.
Gaussian kernel
Measure of the distance between data points
|| x1 - x2 || denotes Euclidean distance
gamma parameter controls width of Gaussian kernel
Import standard packages
End of explanation
"""
from sklearn.svm import SVC
"""
Explanation: Support Vector Classifier (SVC)
End of explanation
"""
X, y = mglearn.tools.make_handcrafted_dataset()
svm = SVC(kernel='rbf', C=10, gamma=0.1).fit(X, y)
mglearn.plots.plot_2d_separator(svm, X, eps=.5)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)
# plot support vectors
sv = svm.support_vectors_
# class labels of support vectors are given by sign of dual coefficients
sv_labels = svm.dual_coef_.ravel() > 0
mglearn.discrete_scatter(sv[:, 0], sv[:, 1], sv_labels, s=15, markeredgewidth=3)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
"""
Explanation: Plot decision boundary and support vectors
SVC with RBF kernel produces smooth (nonlinear) boundary
Parameters: C and gamma
Support vectors are larger symbols in bold on boundary
End of explanation
"""
fig, axes = plt.subplots(3, 3, figsize=(15,10))
for ax, C in zip(axes, [-1, 0, 3]):
for a, gamma in zip(ax, range(-1,2)):
mglearn.plots.plot_svm(log_C=C, log_gamma=gamma, ax=a)
axes[0,0].legend(["Class 0", "Class 1", "Class 2",
"SV Class 0", "SV Class 1"],
ncol=4, loc=(0.9, 1.2))
"""
Explanation: Tuning SVM Parameters
Gamma
Controls width of Gaussian kernel
Determines scale of what is close for points near to each other\
Regularization parameter: C
Limits the importance of each point (via dual_coef)
Visualization of SVM parameters
Plot decision boundaries and support vectors
Different settings of parameters C and gamma
End of explanation
"""
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
print(cancer.keys())
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=0)
svc = SVC()
svc.fit(X_train, y_train)
print("Accurary on Training set: {:.3f}".format(svc.score(X_train, y_train)))
print("Accuracy Test set: {:.3f}".format(svc.score(X_test, y_test)))
"""
Explanation: Effect of Gamma parameter (left to right)
Small gamma means large radius of Guassian kernel, many points considered as close
Low gamma: decision boundary will vary slowly, model of low complexity
High gamma: yeilds more complex model, focusing more on single points
Effect of Regularization parameter C (top to bottom)
Small C means very restricted model, limited influence of individual data points
Increasing value of C allows individual points to have greatre influence on model
High value of C bends decision boundary to correctly classify individual data points
SupportVectorClassifier: Breast Cancer Data
Load data, split dataset into Train set and Test set
Apply RBF kernel, default parameters: C = 1, gamma = 1/n_features
Evaluate model accuracy on Test set
End of explanation
"""
plt.plot(X_train.min(axis=0), 'o', label="min")
plt.plot(X_train.max(axis=0), '^', label="max")
plt.legend(loc=4)
plt.xlabel("Feature Index")
plt.ylabel("Feature Magnitude (log)")
plt.yscale("log")
"""
Explanation: Model Overfitting
Model is overfit with default settings
SVC is very sensitive to parameter settings and scaling of data
Scaling of Data
Plot minimum and maximum values for each feature
Features in cancer data are measured on completely different scales
Devastating effect for the kernel of SVM
End of explanation
"""
# Compute minimum value per feature on Training set
min_on_training = X_train.min(axis=0)
# Compute range of each feature (max - min) on Training set
range_on_training = (X_train - min_on_training).max(axis=0)
# subtract min, divide by range; then min=0 and max=1 for each feature
X_train_scaled = (X_train - min_on_training) / range_on_training
print("Minimum for each feature\n{}".format(X_train_scaled.min(axis=0)))
print("Maximum for each feature\n {}".format(X_train_scaled.max(axis=0)))
# use SAME transformation on Test set, using min and range of training set
X_test_scaled = (X_test - min_on_training) / range_on_training
"""
Explanation: Preprocessing : Rescal the data
Rescale each feature so they are all on approximately same scale
Common method for kernel SVM is to scale data so featurs between 0 and 1
End of explanation
"""
svc = SVC()
svc.fit(X_train_scaled, y_train)
print("Accurary on Training set: {:.3f}".format(svc.score(X_train_scaled, y_train)))
print("Accuracy Test set: {:.3f}".format(svc.score(X_test_scaled, y_test)))
"""
Explanation: Run SVC on Rescaled data
End of explanation
"""
svc = SVC(C=1000)
svc.fit(X_train_scaled, y_train)
print("Accurary on Training set: {:.3f}".format(svc.score(X_train_scaled, y_train)))
print("Accuracy Test set: {:.3f}".format(svc.score(X_test_scaled, y_test)))
"""
Explanation: Adjust Parameters
Scaling data improved accuracy, but model is now underfit
Try increasing value of C or gamma to fit more complex model
Increasing value of C improves model accuracy to 97%
End of explanation
"""
|
zomansud/coursera
|
ml-foundations/week-6/Assignment - Image Classification and Image Retrieval.ipynb
|
mit
|
image_train = graphlab.SFrame('image_train_data/')
image_test = graphlab.SFrame('image_test_data/')
image_train.head()
"""
Explanation: Load the image dataset
End of explanation
"""
image_train['label'].sketch_summary()
"""
Explanation: Computing summary statistics of the data
Using the training data, compute the sketch summary of the ‘label’ column and interpret the results. What’s the least common category in the training data? Save this result to answer the quiz at the end.
End of explanation
"""
label_filter = lambda l : image_train[image_train['label'] == l]
image_train_auto = label_filter('automobile')
len(image_train_auto)
image_train_cat = label_filter('cat')
len(image_train_cat)
image_train_dog = label_filter('dog')
len(image_train_dog)
image_train_bird = label_filter('bird')
len(image_train_bird)
auto_model = graphlab.nearest_neighbors.create(image_train_auto, features=['deep_features'], label='id')
cat_model = graphlab.nearest_neighbors.create(image_train_cat, features=['deep_features'], label='id')
dog_model = graphlab.nearest_neighbors.create(image_train_dog, features=['deep_features'], label='id')
bird_model = graphlab.nearest_neighbors.create(image_train_bird, features=['deep_features'], label='id')
def get_images_from_ids(query_result):
return image_train.filter_by(query_result['reference_label'], 'id')
show_neighbours = lambda i : get_images_from_ids(knn_model.query(image_train[i:i+1]))['image'].show()
image_test[0:1]['image'].show()
graphlab.canvas.set_target('ipynb')
image_test[0:1]['image'].show()
"""
Explanation: Creating category-specific image retrieval models
End of explanation
"""
cat_model.query(image_test[0:1])
image_train_cat[image_train_cat['id'] == 16289]['image'].show()
"""
Explanation: What is the nearest ‘cat’ labeled image in the training data to the cat image above (the first image in the test data)? Save this result.
End of explanation
"""
dog_model.query(image_test[0:1])
image_train_dog[image_train_dog['id'] == 16976]['image'].show()
"""
Explanation: What is the nearest ‘dog’ labeled image in the training data to the cat image above (the first image in the test data)? Save this result.
End of explanation
"""
cat_model.query(image_test[0:1])['distance'].mean()
"""
Explanation: A simple example of nearest-neighbors classification
For the first image in the test data (image_test[0:1]), which we used above, compute the mean distance between this image at its 5 nearest neighbors that were labeled ‘cat’ in the training data (similarly to what you did in the previous question). Save this result.
End of explanation
"""
dog_model.query(image_test[0:1])['distance'].mean()
"""
Explanation: Similarly, for the first image in the test data (image_test[0:1]), which we used above, compute the mean distance between this image at its 5 nearest neighbors that were labeled ‘dog’ in the training data (similarly to what you did in the previous question). Save this result.
End of explanation
"""
label_filter_test = lambda l : image_test[image_test['label'] == l]
image_test_cat = label_filter_test('cat')
print len(image_test_cat)
image_test_dog = label_filter_test('dog')
print len(image_test_dog)
image_test_bird = label_filter_test('bird')
print len(image_test_bird)
image_test_automobile = label_filter_test('automobile')
print len(image_test_automobile)
print len(image_test)
"""
Explanation: [Challenging Question] Computing nearest neighbors accuracy using SFrame operations
End of explanation
"""
dog_dog_neighbors = dog_model.query(image_test_dog, k = 1)
dog_cat_neighbors = cat_model.query(image_test_dog, k = 1)
dog_automobile_neighbors = auto_model.query(image_test_dog, k = 1)
dog_bird_neighbors = bird_model.query(image_test_dog, k = 1)
dog_distances = graphlab.SFrame({
'dog-dog' : dog_dog_neighbors['distance'],
'dog-cat' : dog_cat_neighbors['distance'],
'dog-bird': dog_bird_neighbors['distance'],
'dog-automobile': dog_automobile_neighbors['distance']
})
dog_distances.head()
"""
Explanation: Finding nearest neighbors in the training set for each part of the test set
End of explanation
"""
dog_distances[0:1]['dog-dog']
def is_dog_correct(r):
return r['dog-dog'] < r['dog-cat'] and r['dog-dog'] < r['dog-bird'] and r['dog-dog'] < r['dog-automobile']
is_dog_correct(dog_distances[0:1])
"""
Explanation: Computing the number of correct predictions using 1-nearest neighbors for the dog class
End of explanation
"""
dog_distances.apply(is_dog_correct).sum() / float(len(image_test_dog))
dog_distances
"""
Explanation: Accuracy of predicting dog in the test data:
Using the work you did in this question, what is the accuracy of the 1-nearest neighbor classifier at classifying ‘dog’ images from the test set? Save this result to answer the quiz at the end.
End of explanation
"""
|
constellationcolon/simplexity
|
lpsm.ipynb
|
mit
|
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# define view
r_min = 0.0
r_max = 3.0
s_min = 0.0
s_max = 5.0
res = 50
r = numpy.linspace(r_min, r_max, res)
# plot axes
axes.axhline(0, color='#B3B3B3', linewidth=5)
axes.axvline(0, color='#B3B3B3', linewidth=5)
# plot constraints
c_1 = lambda x: 4 - 2*x
c_2 = lambda x: 1 - x
c_3 = lambda x: 0.25 * ( 6 - 3*x )
c_1_line = axes.plot( r, c_1(r), label='Fibre' ) # 2r + s \geq 4
c_2_line = axes.plot( r, c_2(r), label='Protein' ) # 3r + 3s \geq 3
c_3_line = axes.plot( r, c_3(r), label='Carbohydrate' ) # 3r + 4s \geq 6
# plot objective
s = numpy.linspace(s_min, s_max, res)
c = numpy.empty([r.size, s.size])
for i, r_i in enumerate(r):
c[:,i] = 5 * r_i + 12 * s
axes.contourf(r, s, c, res, cmap='Oranges', alpha=0.5)
r_cut = numpy.linspace(0.0, 2.0, 100)
axes.fill_between(r_cut, c_1(r_cut), color='w')
# plot cost minimising point
axes.plot(2.0, 0, 'o')
# label graph
axes.set_title('Visualising the Diet Problem')
axes.set_xlabel('Chicken Over Rice')
axes.set_ylabel('Sub')
axes.legend()
plt.show()
"""
Explanation: The (College Student) Diet Problem
Consider the canonical college student. After a hard afternoon's work of solving way too many partial differential equations, she emerges from her room to obtain sustenance for the day.
She has a choice between getting chicken over rice (\$5) from the halal cart on her street ($r$), or subs (\$7) from the deli ($s$). She's a poor college student, so she will obviously want to get her money's worth. This is obviously an optimisation problem: she wants to find the amount of chicken over rice and subs she has to buy in order to minimise the total cost she spends on food.
$$
\text{minimise} \quad 5r + 7s
$$
In optimisation, we like to call this expression the objective function.
Well, it's not as simple as that. A girl's got to get her fill of daily nutrients. Fibre, protein, and carbohydrates are all important, and however far away food pyramids are from the quotidien thoughts of college students, a girl can still dream of a pseudo-healthy diet with at least 4 servings of fibre, 3 servings of protein, and 6 servings of carbohydrates.
A chicken over rice has 2 servings of fibre, 3 servings of protein, and 3 servings of carbohydrates, while a sub has 1 serving of fibre, 3 servings of protein, and 4 servings of carbohydrates. To find the combination of meals that satisfies the daily nutritional requirements, we impose the following constraints:
\begin{align}
\text{Fibre: } &2r + s \geq 4 \
\text{Protein: } &3r + 3s \geq 3 \
\text{Carbohydrates: } &3r + 4s \geq 6
\end{align}
Visualising the Problem
End of explanation
"""
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# plot axes
axes.axhline(0, color='k')
axes.axvline(0, color='k')
# plot constraints
c_1 = lambda x: 4 - 2*x
c_2 = lambda x: 1 - x
c_3 = lambda x: - 0.25 * ( - 6 + 3*x )
c_1_line = axes.plot( r, c_1(r), label='Fibre' ) # 2r + s \geq 4
c_2_line = axes.plot( r, c_2(r), label='Protein' ) # 3r + 3s \geq 3
c_3_line = axes.plot( r, c_3(r), label='Carbohydrate' ) # 3r + 4s \geq 6
# plot objective
s = numpy.linspace(s_min, s_max, res)
c = numpy.empty([r.size, s.size])
for i, r_i in enumerate(r):
c[:,i] = - 5 * r_i - 12 * s
axes.contourf(r, s, c, res, cmap='Oranges', alpha=0.5)
r_cut = numpy.linspace(0.0, 2.0, 100)
axes.fill_between(r_cut, c_1(r_cut), color='w')
# plot cost minimising point
axes.plot(2.0, 0, 'o')
# label graph
axes.set_title('Visualising the Diet Problem, Standard Form')
axes.set_xlabel('Chicken Over Rice')
axes.set_ylabel('Sub')
axes.legend(loc=1)
plt.show()
"""
Explanation: We can visualise our diet problem on a graph of "Number of Subs vs. Number of Chicken or Rice", where lines each represent a constraint, and our cost function can be represented in shades of blue: the deeper the blue, the more we will spend on meals.
The regions where we will satisfy our constraints will be the regions above our constraint lines, since we want more than or equal to the number of minimum servings. Obviously, we can't buy a negative number of subs or chicken over rice, so we have the implicit constraints that $r>0$ and $s>0$.
The intersection of all the regions that satisfy each of our constraints is what we call the feasible region, or the feasible set, the region where solutions that satisfy all constraints. In the graph, this is the region with the blue gradient fill.
So our problem of deciding how much of what food to buy has been essentially reduced to finding the point in the feasible set with the minimum cost (i.e. the lightest shade of blue.) With one glance, we can tell that this point is $(0, 2)$, so we should buy 2 chicken over rice, and 0 subs. Interestingly, our feasible region is determined largely by the fibre constraint—read from this what you want.
Well, you think to yourself, that was easy; I can stop reading now!
That's true, if you only have 2 foods to choose between. But in general, life isn't as simple as this; if, say, you're a functioning adult and actually cook, you'll want to choose between the 1000's of grocery items available to you at the local supermarket. In that case, you'll have to draw out one axis for each food item (how you'll do that, I don't know), and then compare the colors across this unvisualisable space. This shit gets real, and fast.
Linear Programming
Well, luckily for us, a clever guy by the name of George Dantzig managed to solve exactly this type of problem for us while he was working for the U.S. Air Force in WWII, when computers were just starting to come out of the realm of science fiction. They faced a similar problem then, as many do now: they only had a set amount of men and resources, and wanted to maximise the amount of work they could do in winning the war.
In other areas, you could also imagine say, a furniture manufacturer wanting to find the most efficient way of using the manpower and planks, screws, tools, and whatever they use to build furniture these days, to produce the combination of furniture that will maximise their profits. Or, on Wall Street, a trader wanting to find the best combination of differently priced assets that maximises projected profits, or minimises risk (or something along those lines; I know nuts about finance).
We call these sorts of problems, wherein we want to maximise (or minimise!) some linear objective function subject to a set of linear constraints linear optimisation problems, and the methods we use to solve these problems linear programming.
Standard Form and Duality
Linear optimisation problems can always be expressed as
\begin{align}
\text{maximise} \quad & b_1 x_1 + b_2 x_2 + \ldots + b_m x_m \
\text{subject to} \quad & a_{11} x_{1} + a_{21} x_{2} + \ldots + a_{m1} x_{m} \leq c_1 \
& a_{12} x_{1} + a_{22} x_{2} + \ldots + a_{m2} x_{m} \leq c_2 \
& \vdots \
& a_{1n} x_{1} + a_{2n} x_{2} + \ldots + a_{mn} x_{m} \leq c_n
\end{align}
In less symbols, this is
\begin{align}
\text{maximise} \quad & b^T x \
\text{subject to} \quad & Ax \leq c
\end{align}
This is what is commonly known as the dual form of the problem. Well, so if there is a dual, then there must actually be 2 problems, right? So what was the first?
Turns out, we call the "first" problem the primal problem, and surprisingly (or not), the solution of the primal problem will give us an upper bound on the corresponding solution of the dual problem. It looks like this:
\begin{align}
\text{minimise} \quad & c_1 y_1 + c_2 y_2 + \ldots + c_m y_ n\
\text{subject to} \quad & a_{11} y_{1} + a_{12} y_{2} + \ldots + a_{1n} y_{n} = b_1 \
& a_{21} y_{1} + a_{22} y_{2} + \ldots + a_{m2} y_{n} = b_2 \
& \vdots \
& a_{m1} y_{1} + a_{m2} y_{2} + \ldots + a_{nm} y_{n} = b_m \
\text{and} \quad & { y_i \geq 0 }_{i=1}^m
\end{align}
aka
\begin{align}
\text{minimise} \quad & c^T y \
\text{subject to} \quad & A^T y = b \
\text{and} \quad & y \geq 0
\end{align}
We basically interchange the constraints' constants and the coefficients in our objective function, and turn the inequalities into equalities. The nice thing about the dual problem and its primal, is that the primal problem has an optimal solution $x^$, then the dual also has an optimal solution $y^$ related by $b^Tx^=c^Ty^$, i.e. the two problems have the same optimum value!
The dual problem for linear optimisation problems was first conjectured by von Neumann, who was then working on game theory. We can think of the fact that any linear programme has a dual problem as 2 players are playing a zero-sum game; any gains on the part of one player must necessarily result in losses for the other player. When you maximise utility for one player, you are at the same time minimising utility for the other.
So what does our college student diet problem look like in the standard form (and its primal?)
Since maximising a function is just minimising the negative of the function, the problem becomes
\begin{align}
\text{maximise} \quad & - 5r - 7s \
\text{subject to} \quad & - 2r - s \leq - 4 \
& - 3r - 3s \leq - 3 \
& - 3r - 4s \leq - 6
\end{align}
End of explanation
"""
import pandas as pd
pd.set_option('display.notebook_repr_html', True)
def pivot(departing, entering, tab):
dpi = tab[tab['basic_variable']==departing].index[0] # index of the departing row
# update basic variable
tab['basic_variable'][dpi] = entering
# normalise departing_row
tab.ix[dpi,0:-1] = tab.ix[dpi,0:-1] / tab[entering][dpi]
departing_row = tab.ix[dpi,0:-1]
# do gauss-jordan on entering variable column
for row in tab.index[tab.index!=dpi]:
tab.ix[row, 0:-1] = tab.ix[row, 0:-1] - tab[entering][row] * departing_row
# Bland's rule
def calculate_ratio(entering, tab):
ratios = tab.ix[0:-1, 'value'] * 0 - 1
for index, is_valid in enumerate(tab.ix[0:-1, entering] > 0):
if is_valid==True:
ratios[index] = tab.ix[index, 'value']/tab.ix[index, entering]
return ratios
def find_entering(tab):
return tab.ix['z',0:-2].idxmin()
def find_departing(ratios, tab):
return tab.ix[ratios[ratios>=0].idxmin(),'basic_variable']
def update_stats(tab):
print "Basic variables: "
basic_variables = tab.ix[0:-1, 'basic_variable'].values
print basic_variables
print "Non-basic variables: "
non_basic_variables = numpy.setdiff1d(tab.columns[0:-2], basic_variables)
print non_basic_variables
print "Entering variable: "
entering_variable = find_entering(tab)
print entering_variable
print "Ratios: "
ratios = calculate_ratio(entering_variable, tab)
print ratios
print "Departing variable: "
departing_variable = find_departing(ratios, tab)
print departing_variable
return departing_variable, entering_variable
def is_optimum(tab):
return (tab.ix['z',0:-2] >= 0).all()
def run_simplex(tableau_dict, tableau_orig, max_iterations=10, force_iterations=0):
if force_iterations == 0:
for i in xrange(max_iterations):
tableau_dict[i] = tableau_orig.copy()
display(tableau_orig)
if is_optimum(tableau_orig):
break
departing_variable, entering_variable = update_stats(tableau_orig)
pivot(departing_variable, entering_variable, tableau_orig)
else:
for i in xrange(force_iterations):
tableau_dict[i] = tableau_orig.copy()
display(tableau_orig)
departing_variable, entering_variable = update_stats(tableau_orig)
pivot(departing_variable, entering_variable, tableau_orig)
c_1 = numpy.array([[ 3, 3, 2, 1, 0, 5, 's_1']])
c_2 = numpy.array([[ 4, 3, 1, 0, 1, 7, 's_2']])
z = numpy.array([[-6, -3, -4, 0, 0, 0, '']])
rows= numpy.concatenate((c_1, c_2, z), axis=0)
tableau = pd.DataFrame(rows, columns=['y_1','y_2','y_3','s_1','s_2','value', 'basic_variable'], index=['c_1','c_2','z'])
tableau.ix[:,0:-1] = tableau.ix[:,0:-1].astype('float')
tableaux = dict()
run_simplex(tableaux, tableau)
from ipywidgets import interact
def diet_problem(step):
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# plot axes
axes.axhline(0, color='k')
axes.axvline(0, color='k')
# plot constraints
c_1 = lambda x: 4 - 2*x
c_2 = lambda x: 1 - x
c_3 = lambda x: - 0.25 * ( - 6 + 3*x )
c_1_line = axes.plot( r, c_1(r), label='Fibre' ) # 2r + s \geq 4
c_2_line = axes.plot( r, c_2(r), label='Protein' ) # 3r + 3s \geq 3
c_3_line = axes.plot( r, c_3(r), label='Carbohydrate' ) # 3r + 4s \geq 6
# plot objective
for i, r_i in enumerate(r):
c[:,i] = - 5 * r_i - 12 * s
axes.contourf(r, s, c, res, cmap='Oranges', alpha=0.5)
axes.fill_between(r_cut, c_1(r_cut), color='w')
step_coords = numpy.array([[0.0, 0.0], [2.0, 0.0]])
# plot point
axes.plot(step_coords[step][0], step_coords[step][1], 'ro', markersize=10)
# label graph
axes.set_title('Simplex Method on the College Diet Problem, Iteration ' + str(step))
axes.set_xlabel('Chicken Over Rice')
axes.set_ylabel('Sub')
axes.legend(loc=1)
plt.show()
display(tableaux[step])
interact(diet_problem, step=(0,1));
"""
Explanation: In dual form, this is
\begin{align}
\text{minimise} \quad & - 4y_1 - 3y_2 - 6y_3 \
\text{subject to} \quad & 2y_1 + 3y_2 + 3y_3 = 5 \
& y_1 + 3y_2 + 4y_3 = 7 \
\text{and} \quad & { y_i \geq 0 }_{i=1}^3 \
\end{align}
Which can be seen as minimising the objective function on the line segment formed by intersecting the 2 constraint planes. We can also interpret this as wanting to maximise the nutritional value of our meals, given that trying to increase the quantity of one nutrient will necessarily mean that we have to give up some amount of another nutrient.
The Simplex Method
Standard Form (for the Simplex Method)
\begin{align}
\text{maximise} \quad & c_1 x_1 + c_2 x_2 + \ldots + c_m x_m \
\text{subject to} \quad & a_{11} x_1 + a_{12} x_2 + \ldots + a_{1m} x_m \leq b_1 \
& a_{21} x_2 + a_{22} x_2 + \ldots + a_{2m} x_m \leq b_2 \
& \vdots \
& a_{n1} x_n + a_{n2} x_n + \ldots + a_{nm} x_m \leq b_n \
\text{and} \quad & { x_i \geq 0 }{i=1}^m \text{ and } { b_j \geq 0 }{j=1}^n
\end{align}
If you are currently trying to minimise the objective function, turn it into a maximisation problem by taking the negative of the expression
Turn all the inequality constraints into equality constraints by adding slack variables
If these transformation still don't allow your system of equations to fit the form, solve the dual form of the problem!
System of Constraint Equations
\begin{align}
\text{maximise} \quad & c_1 x_1 + c_2 x_2 + \ldots + c_m x_m = z \
\text{subject to} \quad & a_11 x_1 + a_12 x_2 + \ldots + a_1m x_m + s_1 = b_1 \
& a_21 x_2 + a_22 x_2 + \ldots + a_2m x_m + s_2 = b_2 \
& \vdots \
& a_n1 x_n + a_n2 x_n + \ldots + a_nm x_m + s_n = b_n \
\text{and} \quad & { x_i \geq 0 }{i=1}^{m}, ~ { s_i \geq 0 }{j=1}^{n}, ~ \text{ and } { b_j \geq 0 }_{j=1}^n
\end{align}
Taking another look at our diet problem, we can put this problem
\begin{align}
\text{maximise} \quad & - 5r - 7s \
\text{subject to} \quad & - 2r - s \leq - 4 \
& - 3r - 3s \leq - 3 \
& - 3r - 4s \leq - 6 \
\text{and} \quad & r, s \geq 0
\end{align}
into standard form for the simplex method by putting it into its dual form:
\begin{align}
\text{maximise} \quad & 6y_1 + 3y_2 + 4y_3 \
\text{subject to} \quad & 3y_1 + 3y_2 + 2y_3 \leq 5 \
& 4y_1 + 3y_2 + y_3 \leq 7 \
\text{and} \quad & { y_i \geq 0 }_{i=1}^3 \
\end{align}
Hence, the constraint equations are
\begin{align}
\text{maximise} \quad & 6y_1 + 3y_2 + 4y_3 = z \
\text{subject to} \quad & 3y_1 + 3y_2 + 2y_3 + s_1 = 5 \
& 4y_1 + 3y_2 + y_3 + s_2 = 7 \
\text{and} \quad & { y_i \geq 0 }{i=1}^3 \text{ and } { s_i \geq 0 }{i=1}^2 \
\end{align}
The Algorithm
End of explanation
"""
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# define view
x_1_min = 0.0
x_1_max = 3.0
x_2_min = 0.0
x_2_max = 5.0
res = 50
# plot axes
axes.axhline(0, color='k')
axes.axvline(0, color='k')
# plot constraints
x_1 = numpy.linspace(x_1_min, x_1_max, res)
c_1 = lambda x: 4.0 - 2.0*x
c_2 = lambda x: (30.0 - 10.0*x)/14.0
c_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # 2x_1 + x_2 \leq 4
c_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # 10x_1 + 14x_2 \leq 30
# plot objective
x_2 = numpy.linspace(x_2_min, x_2_max, res)
c = numpy.empty([x_1.size, x_2.size])
for i, x_1_i in enumerate(x_1):
c[:,i] = 5 * x_1_i + 7 * x_2
axes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)
# shade feasible region
c_1_bottom = numpy.linspace(0.0, 2.0, res)
c_2_bottom = numpy.linspace(0.0, 3.0, res)
axes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)
axes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)
# label graph
axes.set_title('How many solutions?')
axes.set_xlabel(r'x_1')
axes.set_ylabel(r'x_2')
axes.legend(loc=1)
plt.show()
"""
Explanation: Bland's Rule
This seemingly arbitrary rule will seem less arbitrary in just a while.
Multiple Optimal Solutions
So, given the graphical intuition we now have for how the simplex method works, do we know if there ever a time when we would encounter more than 1 optimal solution for a given problem?
\begin{align}
\text{maximise} \quad & 5x_1 + 7x_2 \
\text{subject to} \quad & 2x_1 + x_2 \leq 4 \
& 10x_1 + 14x_2 \leq 30 \
\text{and} \quad & x_1, x_2 \geq 0
\end{align}
End of explanation
"""
c_1 = numpy.array([[ 2, 1, 1, 0, 4, 's_1']])
c_2 = numpy.array([[10, 14, 0, 1, 30, 's_2']])
z = numpy.array([[-5, -7, 0, 0, 0, '']])
rows= numpy.concatenate((c_1, c_2, z), axis=0)
tableau_multiple = pd.DataFrame(rows, columns=['x_1','x_2','s_1','s_2','value', 'basic_variable'], index=['c_1','c_2','z'])
tableau_multiple.ix[:,0:-1] = tableau_multiple.ix[:,0:-1].astype('float')
tableaux_multiple = dict()
run_simplex(tableaux_multiple, tableau_multiple, force_iterations=3)
step_coords = numpy.array([[0.0, 0.0], [0.0, 2.14286], [tableaux_multiple[2].ix['c_1','value'], tableaux_multiple[2].ix['c_2','value']]])
step_value = numpy.array([tableaux_multiple[0].ix['z','value'], tableaux_multiple[1].ix['z','value'], tableaux_multiple[2].ix['z','value']])
def multiple_problem(step):
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# define view
x_1_min = 0.0
x_1_max = 3.0
x_2_min = 0.0
x_2_max = 5.0
# plot axes
axes.axhline(0, color='k')
axes.axvline(0, color='k')
# plot constraints
x_1 = numpy.linspace(x_1_min, x_1_max, res)
c_1 = lambda x: 4.0 - 2.0*x
c_2 = lambda x: (30.0 - 10.0*x)/14.0
c_1_line = axes.plot( r, c_1(r), label='Constraint 1' ) # 2x_1 + x_2 \leq 4
c_2_line = axes.plot( r, c_2(r), label='Constraint 2' ) # 10x_1 + 14x_2 \leq 30
# plot objective
x_2 = numpy.linspace(x_2_min, x_2_max, res)
c = numpy.empty([x_1.size, x_2.size])
for i, x_1_i in enumerate(x_1):
c[:,i] = 5 * x_1_i + 7 * x_2
# color map of objective function values
axes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)
# shade feasible region
c_1_bottom = numpy.linspace(0.0, 2.0, res)
c_2_bottom = numpy.linspace(0.0, 3.0, res)
axes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)
axes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)
# plot point
axes.plot(step_coords[step][0], step_coords[step][1], 'ro', markersize=10)
axes.text(step_coords[step][0]+0.1, step_coords[step][1], step_value[step])
# label graph
axes.set_title('How many solutions?')
axes.set_xlabel('x_1')
axes.set_ylabel('x_2')
axes.legend(loc=1)
plt.show()
display(tableaux_multiple[step])
interact(multiple_problem, step=(0,2));
"""
Explanation: \begin{align}
\text{maximise} \quad & 5x_1 + 7x_2 \
\text{subject to} \quad & 2x_1 + x_2 + s_1 = 4 \
& 10x_1 + 14x_2 + s_2 = 30 \
\text{and} \quad & x_1, x_2, s_1, s_2 \geq 0
\end{align}
End of explanation
"""
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# define view
x_1_min = 0.0
x_1_max = 10.0
x_2_min = 0.0
x_2_max = 15.0
# res = 100
# plot axes
axes.axhline(0, color='k')
axes.axvline(0, color='k')
# plot constraints
x_1 = numpy.linspace(x_1_min, x_1_max, res)
c_1 = lambda x: 5.0 + x
c_2 = lambda x: 7 + 0.5*x
c_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # -x_1 + x_2 \leq 5
c_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # -\frac{1}{2}x_1 + x_2 \leq 7
# plot objective
x_2 = numpy.linspace(x_2_min, x_2_max, res)
c = numpy.empty([x_1.size, x_2.size])
for i, x_1_i in enumerate(x_1):
c[:,i] = 5 * x_1_i + 7 * x_2
axes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)
# shade feasible region
# c_1_bottom = numpy.linspace(0.0, 2.0, res)
# c_2_bottom = numpy.linspace(0.0, 3.0, res)
axes.fill_between(x_1, c_1(x_1), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)
axes.fill_between(x_1, c_2(x_1), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)
# label graph
axes.set_title('Unbounded Optima')
axes.set_xlabel(r'$x_1$')
axes.set_ylabel(r'$x_2$')
axes.legend(loc=2)
plt.show()
"""
Explanation: Unbounded Optima
\begin{align}
\text{maximise} \quad & 5x_1 + 7x_2 \
\text{subject to} \quad & -x_1 + x_2 \leq 5 \
& -\frac{1}{2}x_1 + x_2 \leq 7 \
\text{and} \quad & x_1, x_2 \geq 0
\end{align}
End of explanation
"""
c_1 = numpy.array([[ -1, 1, 1, 0, 5, 's_1']])
c_2 = numpy.array([[-0.5, 1, 0, 1, 7, 's_2']])
z = numpy.array([[ -5, -7, 0, 0, 0, '']])
rows= numpy.concatenate((c_1, c_2, z), axis=0)
tableau_unbounded = pd.DataFrame(rows, columns=['x_1','x_2','s_1','s_2','value', 'basic_variable'], index=['c_1','c_2','z'])
tableau_unbounded.ix[:,0:-1] = tableau_unbounded.ix[:,0:-1].astype('float')
tableaux_unbounded = dict()
run_simplex(tableaux_unbounded, tableau_unbounded)
"""
Explanation: \begin{align}
\text{maximise} \quad & 5x_1 + 7x_2 \
\text{subject to} \quad & -x_1 + x_2 + s_1 = 5 \
& -\frac{1}{2}x_1 + x_2 + s_2 = 7 \
\text{and} \quad & x_1, x_2, s_1, s_2 \geq 0
\end{align}
End of explanation
"""
display(tableau_unbounded)
"""
Explanation: We got an error!
ValueError: attempt to get argmin of an empty sequence
Usually, errors are bad things, but in this case, the error is trying to tell us something
In the code:
return tab.ix[ratios[ratios>=0].idxmin(),'basic_variable']
Which is telling us that no non-negative ratio was found! Why is this a problem for us? Let's take a look at the equations our tableau, and our equations at this point in time:
End of explanation
"""
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# define view
x_1_min = 0.0
x_1_max = 3.0
x_2_min = 0.0
x_2_max = 5.0
# res = 100
# plot axes
axes.axhline(0, color='k')
axes.axvline(0, color='k')
# plot constraints
x_1 = numpy.linspace(x_1_min, x_1_max, res)
c_1 = lambda x: 3.0 - x
c_2 = lambda x: -3.0 + x
c_3 = lambda x: 2.0 * numpy.ones(x.size)
c_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # 2x_1 + x_2 \leq 4
c_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # 10x_1 + 14x_2 \leq 30
c_3_line = axes.plot( x_1, c_3(x_1), label='Constraint 3' ) # -2x_1 + x_2 \leq 0
# plot objective
x_2 = numpy.linspace(x_2_min, x_2_max, res)
c = numpy.empty([x_1.size, x_2.size])
for i, x_1_i in enumerate(x_1):
c[:,i] = 2.0 * x_1_i + x_2
axes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)
# shade feasible region
c_1_bottom = numpy.linspace(0.0, 3.0, res)
c_2_bottom = numpy.linspace(0.0, 3.0, res)
c_3_bottom = numpy.linspace(0.0, 3.0, res)
axes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)
axes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)
axes.fill_between(c_3_bottom, c_3(c_3_bottom), color=plt.rcParams['axes.color_cycle'][2], alpha=0.5)
# label graph
axes.set_title('Degeneracy and Cycling')
axes.set_xlabel(r'x_1')
axes.set_ylabel(r'x_2')
axes.legend(loc=1)
plt.show()
"""
Explanation: \begin{gather}
z = 83 + 17 s_1 - 24 s_2 \
x_1 = 4 + 2 s_1 - 2 s_2 \
x_2 = 9 + s_1 - 2 s_2
\end{gather}
At this point, we want to pick $s_1$ as our entering variable because it is has most negative coefficient, and increasing the value of $s_1$ would most increase the value of $z$.
Usually, increasing the value of $s_1$ would also mean that we have to decrease the value of one of the basic variables to 0 (so that we stay within our feasible region).
Here, what we have is that increasing the value of $s_1$ would also increase the value of both our basic variables, which means that our objective function will be able to increase without bound.
So the simplex method is able to tell us when our problem is unbounded, by virtue of the fact that the negative coefficient in the tableau indicates that we have not attained the optimum, but we are also unable to find a positive ratio to choose our departing variable.
Degeneracy and Cycling
Disclaimer: this example was stolen from here.
\begin{align}
\text{maximise} \quad & 2x_1 + 7x_2 \
\text{subject to} \quad & -x_1 + x_2 \leq 3 \
& x_1 - x_2 \leq 3 \
& x_2 \leq 2 \
\text{and} \quad & x_1, x_2 \geq 0
\end{align}
End of explanation
"""
c_1 = numpy.array([[ 3, 1, 1, 0, 0, 6, 's_1']])
c_2 = numpy.array([[ 1, -1, 0, 1, 0, 2, 's_2']])
c_3 = numpy.array([[ 0, 1, 0, 0, 1, 3, 's_3']])
z = numpy.array([[-2, -1, 0, 0, 0, 0, '']])
rows= numpy.concatenate((c_1, c_2, c_3, z), axis=0)
tableau_degenerate = pd.DataFrame(rows, columns=['x_1','x_2','s_1','s_2','s_3','value', 'basic_variable'], index=['c_1','c_2','c_3','z'])
tableau_degenerate.ix[:,0:-1] = tableau_degenerate.ix[:,0:-1].astype('float')
tableaux_degenerate = dict()
run_simplex(tableaux_degenerate, tableau_degenerate)
step_coords = numpy.transpose([numpy.zeros(len(tableaux_degenerate)), 2.0*numpy.ones(len(tableaux_degenerate))])
step_coords[0][1] = 0.0
def degeneracy_plot(step):
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
# define view
x_1_min = 0.0
x_1_max = 3.0
x_2_min = 0.0
x_2_max = 5.0
# res = 100
# plot axes
axes.axhline(0, color='k')
axes.axvline(0, color='k')
# plot constraints
x_1 = numpy.linspace(x_1_min, x_1_max, res)
c_1 = lambda x: 3.0 - x
c_2 = lambda x: -3.0 + x
c_3 = lambda x: 2.0 * numpy.ones(x.size)
c_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # 2x_1 + x_2 \leq 4
c_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # 10x_1 + 14x_2 \leq 30
c_3_line = axes.plot( x_1, c_3(x_1), label='Constraint 3' ) # -2x_1 + x_2 \leq 0
# plot objective
x_2 = numpy.linspace(x_2_min, x_2_max, res)
c = numpy.empty([x_1.size, x_2.size])
for i, x_1_i in enumerate(x_1):
c[:,i] = 2.0 * x_1_i + x_2
axes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)
# shade feasible region
c_1_bottom = numpy.linspace(0.0, 3.0, res)
c_2_bottom = numpy.linspace(0.0, 3.0, res)
c_3_bottom = numpy.linspace(0.0, 3.0, res)
axes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)
axes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)
axes.fill_between(c_3_bottom, c_3(c_3_bottom), color=plt.rcParams['axes.color_cycle'][2], alpha=0.5)
# plot point
axes.plot(step_coords[step][0], step_coords[step][1], 'ro', markersize=10)
# label graph
axes.set_title('Degeneracy and Cycling, Iteration ' + str(step))
axes.set_xlabel(r'x_1')
axes.set_ylabel(r'x_2')
axes.legend(loc=1)
plt.show()
display(tableaux_degenerate[step])
interact(degeneracy_plot, step=(0,len(tableaux_degenerate)-1))
"""
Explanation: \begin{align}
\text{maximise} \quad & 2x_1 + 7x_2 \
\text{subject to} \quad & -x_1 + x_2 + s_1 = 3 \
& x_1 - x_2 + s_2 = 3 \
& x_2 + s_3 = 2 \
\text{and} \quad & {x_i}{i=1}^2, {s_j}{j=1}^3 \geq 0
\end{align}
End of explanation
"""
tableaux_degenerate[1]
"""
Explanation: You think you're moving, but you get nowhere. — Stop and Stare, OneRepublic
As its name suggests, degeneracy is when you get a basic variable (that's supposed to have a non-zero value) with a value of 0, and you are able to modify the value of the objective function without moving on the simplex.
In general, predicting when degeneracy will occur is non-trivial; one source claims that it is NP-complete. You can read more about it here.
Bland's Rule
Choose the non-basic variable with the most negative coefficient as the entering variable
Choose the basic variable producing the smallest value/pivot ratio as the departing variable
Using Bland's Rule, the Simplex Method will never cycle if it encounters degeneracy (i.e. it halts on all inputs)
So what is cycling?
End of explanation
"""
pivot('s_2', 'x_2', tableaux_degenerate[1])
tableaux_degenerate[1]
"""
Explanation: Without Bland's Rule, one could potentially choose to pivot on $s_2$, which will give us
End of explanation
"""
pivot('x_2', 's_2', tableaux_degenerate[1])
tableaux_degenerate[1]
"""
Explanation: Choosing $x_2$ to pivot back to seems like a good idea, right? Nope.
End of explanation
"""
c_1 = numpy.array([[ 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 's_1']])
c_2 = numpy.array([[ 20.0, 1.0, 0.0, 0.0, 1.0, 0.0, 100.0, 's_2']])
c_3 = numpy.array([[ 200.0, 20.0, 1.0, 0.0, 0.0, 1.0, 10000.0, 's_3']])
z = numpy.array([[-100.0, -10.0, -1.0, 0.0, 0.0, 0.0, 0.0, '']])
rows= numpy.concatenate((c_1, c_2, c_3, z), axis=0)
tableau_klee_minty = pd.DataFrame(rows, columns=['x_1','x_2', 'x_3','s_1','s_2','s_3','value', 'basic_variable'], index=['c_1','c_2','c_3','z'])
tableau_klee_minty.ix[:,0:-1] = tableau_klee_minty.ix[:,0:-1].astype('float')
tableaux_klee_minty = dict()
run_simplex(tableaux_klee_minty, tableau_klee_minty)
"""
Explanation: Cycling, ladies and gentlemen, aka a slow spiral into insanity.
$\epsilon-$perturbations
Another earlier (and nowadays less popular) method for avoiding degeneracy is by introducing $\epsilon$-perturbations into the problem. Recall that the standard system goes like
\begin{align}
\text{maximise} \quad & c^T x \
\text{subject to} \quad & Ax = b \
\text{and} \quad & x \geq 0
\end{align}
With $\epsilon$-perturbations, we will instead solve
\begin{align}
\text{maximise} \quad & c^T x \
\text{subject to} \quad & Ax = b + \epsilon \
\text{and} \quad & x \geq 0
\end{align}
which will give us a close enough answer to the original problem, and help us avoid the problem with the 0's. This kind of happens automatically as a bonus if you're running the simplex algorithm on a computer; as the program runs, errors from truncation, etc. build up, and you eventually get out of the cycle because your computer is doing floating point arithmetic.
Which is just about the one good thing about floating point arithmetic, I guess.
Time Complexity of the Simplex Method
The Klee-Minty Cube
\begin{align}
\text{maximise} \quad & 100x_1 + 10x_2 + x_3 \
\text{subject to} \quad & x_1 \leq 1 \
& 20x_1 + x_2 \leq 100 \
& 200x_1 + 20x_2 + x_3 \leq 10000\
\text{and} \quad & x_1, x_2, x_3 \geq 0
\end{align}
End of explanation
"""
|
jserenson/Python_Bootcamp
|
Statements Assessment Test - Solutions.ipynb
|
gpl-3.0
|
st = 'Print only the words that start with s in this sentence'
for word in st.split():
if word[0] == 's':
print word
"""
Explanation: Statements Assessment Solutions
Use for, split(), and if to create a Statement that will print out words that start with 's':
End of explanation
"""
range(0,11,2)
"""
Explanation: Use range() to print all the even numbers from 0 to 10.
End of explanation
"""
[x for x in range(1,50) if x%3 == 0]
"""
Explanation: Use List comprehension to create a list of all numbers between 1 and 50 that are divisible by 3.
End of explanation
"""
st = 'Print every word in this sentence that has an even number of letters'
for word in st.split():
if len(word)%2 == 0:
print word+" <-- has an even length!"
"""
Explanation: Go through the string below and if the length of a word is even print "even!"
End of explanation
"""
for num in xrange(1,101):
if num % 5 == 0 and num % 3 == 0:
print "FizzBuzz"
elif num % 3 == 0:
print "Fizz"
elif num % 5 == 0:
print "Buzz"
else:
print num
"""
Explanation: Write a program that prints the integers from 1 to 100. But for multiples of three print "Fizz" instead of the number, and for the multiples of five print "Buzz". For numbers which are multiples of both three and five print "FizzBuzz".
End of explanation
"""
st = 'Create a list of the first letters of every word in this string'
[word[0] for word in st.split()]
"""
Explanation: Use List Comprehension to create a list of the first letters of every word in the string below:
End of explanation
"""
|
cathalmccabe/PYNQ
|
boards/Pynq-Z1/base/notebooks/arduino/arduino_joystick.ipynb
|
bsd-3-clause
|
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
"""
Explanation: Arduino Joystick Shield Example
This example shows how to use the Sparkfun Joystick
on the board. The Joystick shield contains an analog joystick which is
connected to A0 and A1 analog channels of the Arduino connector. It also
contains four push buttons connected at D3-D6 pins of the Arduino connector.
For this notebook, an Arduino joystick shield is required.
End of explanation
"""
DIRECTION_VALUE_MAP = {
0: 'up',
1: 'up_right',
2: 'right',
3: 'down_right',
4: 'down',
5: 'down_left',
6: 'left',
7: 'up_left',
8: 'center'
}
BUTTON_INDEX_MAP = {
'D3': 0,
'D4': 1,
'D5': 2,
'D6': 3
}
"""
Explanation: 1. Use Microblaze to control the joystick
Make sure the joystick shield is plugged in. For the Microblaze to transfer
direction or button values back, we need to define a few additional constants.
End of explanation
"""
%%microblaze base.ARDUINO
#include "xparameters.h"
#include "circular_buffer.h"
#include "gpio.h"
#include "xsysmon.h"
#include <pyprintf.h>
#define X_THRESHOLD_LOW 25000
#define X_THRESHOLD_HIGH 39000
#define Y_THRESHOLD_LOW 25000
#define Y_THRESHOLD_HIGH 39000
typedef enum directions {
up = 0,
right_up,
right,
right_down,
down,
left_down,
left,
left_up,
centered
}direction_e;
static gpio gpio_buttons[4];
static XSysMon SysMonInst;
XSysMon_Config *SysMonConfigPtr;
XSysMon *SysMonInstPtr = &SysMonInst;
int init_joystick(){
unsigned int i, status;
SysMonConfigPtr = XSysMon_LookupConfig(XPAR_SYSMON_0_DEVICE_ID);
if(SysMonConfigPtr == NULL)
return -1;
status = XSysMon_CfgInitialize(
SysMonInstPtr, SysMonConfigPtr, SysMonConfigPtr->BaseAddress);
if(XST_SUCCESS != status)
return -1;
for (i=0; i<4; i++){
gpio_buttons[i] = gpio_open(i+3);
gpio_set_direction(gpio_buttons[i], GPIO_IN);
}
return 0;
}
unsigned int get_direction_value(){
direction_e direction;
unsigned int x_position, y_position;
while ((XSysMon_GetStatus(SysMonInstPtr) &
XSM_SR_EOS_MASK) != XSM_SR_EOS_MASK);
x_position = XSysMon_GetAdcData(SysMonInstPtr, XSM_CH_AUX_MIN+1);
y_position = XSysMon_GetAdcData(SysMonInstPtr, XSM_CH_AUX_MIN+9);
if (x_position > X_THRESHOLD_HIGH) {
if (y_position > Y_THRESHOLD_HIGH) {
direction = right_up;
} else if (y_position < Y_THRESHOLD_LOW) {
direction = right_down;
} else {
direction = right;
}
} else if (x_position < X_THRESHOLD_LOW) {
if (y_position > Y_THRESHOLD_HIGH) {
direction = left_up;
} else if (y_position < Y_THRESHOLD_LOW) {
direction = left_down;
} else {
direction = left;
}
} else {
if (y_position > Y_THRESHOLD_HIGH) {
direction = up;
} else if (y_position < Y_THRESHOLD_LOW) {
direction = down;
} else {
direction = centered;
}
}
return direction;
}
unsigned int get_button_value(unsigned int btn_i){
unsigned int value;
value = gpio_read(gpio_buttons[btn_i]);
return value;
}
"""
Explanation: The joystick can measure horizontal direction x
and vertical direction y.
The thresholds for raw values are:
Horizontal:
| Threshold | Direction |
| ------------------ |:------------:|
| x < 25000 | left |
| 25000 < x < 39000 | center |
| x > 39000 | right |
Vertical:
| Threshold | Direction |
| ------------------ |:------------:|
| y < 25000 | down |
| 25000 < y < 39000 | center |
| y > 39000 | up |
End of explanation
"""
init_joystick()
"""
Explanation: 2. Define Python wrapper for Microblaze functions
We will also need to initialize the joystick before we can read any value.
The following function returns 0 if the initialization is successful.
End of explanation
"""
def read_direction():
direction_value = get_direction_value()
return DIRECTION_VALUE_MAP[direction_value]
def read_button(button):
return get_button_value(BUTTON_INDEX_MAP[button])
"""
Explanation: The following Python wrappers will call the Microblaze functions internally.
End of explanation
"""
read_direction()
"""
Explanation: 3. Find direction
We can measure the direction by calling read_direction().
For the next cell, leave the joystick in its natural position.
End of explanation
"""
read_direction()
"""
Explanation: Let's pull the joystick towards the bottom right corner.
End of explanation
"""
for button in BUTTON_INDEX_MAP:
if read_button(button):
print('Button {} is not pressed.'.format(button))
else:
print('Button {} is pressed.'.format(button))
"""
Explanation: 4. Read button values
Based on the schematic
of the shield, we can see the read value will go low if the corresponding
button has been pressed.
Run the next cell while pushing both button D4 and D6.
End of explanation
"""
|
AllenDowney/ThinkBayes2
|
soln/chap15.ipynb
|
mit
|
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py')
from utils import set_pyplot_params
set_pyplot_params()
"""
Explanation: Mark and Recapture
Think Bayes, Second Edition
Copyright 2020 Allen B. Downey
License: Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)
End of explanation
"""
import numpy as np
from scipy.stats import hypergeom
N = 100
K = 23
n = 19
ks = np.arange(12)
ps = hypergeom(N, K, n).pmf(ks)
"""
Explanation: This chapter introduces "mark and recapture" experiments, in which we sample individuals from a population, mark them somehow, and then take a second sample from the same population. Seeing how many individuals in the second sample are marked, we can estimate the size of the population.
Experiments like this were originally used in ecology, but turn out to be useful in many other fields. Examples in this chapter include software engineering and epidemiology.
Also, in this chapter we'll work with models that have three parameters, so we'll extend the joint distributions we've been using to three dimensions.
But first, grizzly bears.
The Grizzly Bear Problem
In 1996 and 1997 researchers deployed bear traps in locations in British Columbia and Alberta, Canada, in an effort to estimate the population of grizzly bears. They describe the experiment in this article.
The "trap" consists of a lure and several strands of barbed wire intended to capture samples of hair from bears that visit the lure. Using the hair samples, the researchers use DNA analysis to identify individual bears.
During the first session, the researchers deployed traps at 76 sites. Returning 10 days later, they obtained 1043 hair samples and identified 23 different bears. During a second 10-day session they obtained 1191 samples from 19 different bears, where 4 of the 19 were from bears they had identified in the first batch.
To estimate the population of bears from this data, we need a model for the probability that each bear will be observed during each session. As a starting place, we'll make the simplest assumption, that every bear in the population has the same (unknown) probability of being sampled during each session.
With these assumptions we can compute the probability of the data for a range of possible populations.
As an example, let's suppose that the actual population of bears is 100.
After the first session, 23 of the 100 bears have been identified.
During the second session, if we choose 19 bears at random, what is the probability that 4 of them were previously identified?
I'll define
$N$: actual population size, 100.
$K$: number of bears identified in the first session, 23.
$n$: number of bears observed in the second session, 19 in the example.
$k$: number of bears in the second session that were previously identified, 4.
For given values of $N$, $K$, and $n$, the probability of finding $k$ previously-identified bears is given by the hypergeometric distribution:
$$\binom{K}{k} \binom{N-K}{n-k}/ \binom{N}{n}$$
where the binomial coefficient, $\binom{K}{k}$, is the number of subsets of size $k$ we can choose from a population of size $K$.
To understand why, consider:
The denominator, $\binom{N}{n}$, is the number of subsets of $n$ we could choose from a population of $N$ bears.
The numerator is the number of subsets that contain $k$ bears from the previously identified $K$ and $n-k$ from the previously unseen $N-K$.
SciPy provides hypergeom, which we can use to compute this probability for a range of values of $k$.
End of explanation
"""
import matplotlib.pyplot as plt
from utils import decorate
plt.bar(ks, ps)
decorate(xlabel='Number of bears observed twice',
ylabel='PMF',
title='Hypergeometric distribution of k (known population 100)')
"""
Explanation: The result is the distribution of $k$ with given parameters $N$, $K$, and $n$.
Here's what it looks like.
End of explanation
"""
import numpy as np
from utils import make_uniform
qs = np.arange(50, 501)
prior_N = make_uniform(qs, name='N')
prior_N.shape
"""
Explanation: The most likely value of $k$ is 4, which is the value actually observed in the experiment.
That suggests that $N=100$ is a reasonable estimate of the population, given this data.
We've computed the distribution of $k$ given $N$, $K$, and $n$.
Now let's go the other way: given $K$, $n$, and $k$, how can we estimate the total population, $N$?
The Update
As a starting place, let's suppose that, prior to this study, an expert estimates that the local bear population is between 50 and 500, and equally likely to be any value in that range.
I'll use make_uniform to make a uniform distribution of integers in this range.
End of explanation
"""
Ns = prior_N.qs
K = 23
n = 19
k = 4
likelihood = hypergeom(Ns, K, n).pmf(k)
"""
Explanation: So that's our prior.
To compute the likelihood of the data, we can use hypergeom with constants K and n, and a range of values of N.
End of explanation
"""
posterior_N = prior_N * likelihood
posterior_N.normalize()
"""
Explanation: We can compute the posterior in the usual way.
End of explanation
"""
posterior_N.plot(color='C4')
decorate(xlabel='Population of bears (N)',
ylabel='PDF',
title='Posterior distribution of N')
"""
Explanation: And here's what it looks like.
End of explanation
"""
posterior_N.max_prob()
"""
Explanation: The most likely value is 109.
End of explanation
"""
posterior_N.mean()
"""
Explanation: But the distribution is skewed to the right, so the posterior mean is substantially higher.
End of explanation
"""
posterior_N.credible_interval(0.9)
"""
Explanation: And the credible interval is quite wide.
End of explanation
"""
K = 23
n = 19
k = 4
"""
Explanation: This solution is relatively simple, but it turns out we can do a little better if we model the unknown probability of observing a bear explicitly.
Two-Parameter Model
Next we'll try a model with two parameters: the number of bears, N, and the probability of observing a bear, p.
We'll assume that the probability is the same in both rounds, which is probably reasonable in this case because it is the same kind of trap in the same place.
We'll also assume that the probabilities are independent; that is, the probability a bear is observed in the second round does not depend on whether it was observed in the first round. This assumption might be less reasonable, but for now it is a necessary simplification.
Here are the counts again:
End of explanation
"""
k10 = 23 - 4
k01 = 19 - 4
k11 = 4
"""
Explanation: For this model, I'll express the data in a notation that will make it easier to generalize to more than two rounds:
k10 is the number of bears observed in the first round but not the second,
k01 is the number of bears observed in the second round but not the first, and
k11 is the number of bears observed in both rounds.
Here are their values.
End of explanation
"""
N = 100
observed = k01 + k10 + k11
k00 = N - observed
k00
"""
Explanation: Suppose we know the actual values of N and p. We can use them to compute the likelihood of this data.
For example, suppose we know that N=100 and p=0.2.
We can use N to compute k00, which is the number of unobserved bears.
End of explanation
"""
x = [k00, k01, k10, k11]
x
"""
Explanation: For the update, it will be convenient to store the data as a list that represents the number of bears in each category.
End of explanation
"""
p = 0.2
q = 1-p
y = [q*q, q*p, p*q, p*p]
y
"""
Explanation: Now, if we know p=0.2, we can compute the probability a bear falls in each category. For example, the probability of being observed in both rounds is p*p, and the probability of being unobserved in both rounds is q*q (where q=1-p).
End of explanation
"""
from scipy.stats import multinomial
likelihood = multinomial.pmf(x, N, y)
likelihood
"""
Explanation: Now the probability of the data is given by the multinomial distribution:
$$\frac{N!}{\prod x_i!} \prod y_i^{x_i}$$
where $N$ is actual population, $x$ is a sequence with the counts in each category, and $y$ is a sequence of probabilities for each category.
SciPy provides multinomial, which provides pmf, which computes this probability.
Here is the probability of the data for these values of N and p.
End of explanation
"""
qs = np.linspace(0, 0.99, num=100)
prior_p = make_uniform(qs, name='p')
"""
Explanation: That's the likelihood if we know N and p, but of course we don't. So we'll choose prior distributions for N and p, and use the likelihoods to update it.
The Prior
We'll use prior_N again for the prior distribution of N, and a uniform prior for the probability of observing a bear, p:
End of explanation
"""
from utils import make_joint
joint_prior = make_joint(prior_p, prior_N)
joint_prior.shape
"""
Explanation: We can make a joint distribution in the usual way.
End of explanation
"""
from empiricaldist import Pmf
joint_pmf = Pmf(joint_prior.stack())
joint_pmf.head(3)
type(joint_pmf)
type(joint_pmf.index)
joint_pmf.shape
"""
Explanation: The result is a Pandas DataFrame with values of N down the rows and values of p across the columns.
However, for this problem it will be convenient to represent the prior distribution as a 1-D Series rather than a 2-D DataFrame.
We can convert from one format to the other using stack.
End of explanation
"""
likelihood = joint_pmf.copy()
"""
Explanation: The result is a Pmf whose index is a MultiIndex.
A MultiIndex can have more than one column; in this example, the first column contains values of N and the second column contains values of p.
The Pmf has one row (and one prior probability) for each possible pair of parameters N and p.
So the total number of rows is the product of the lengths of prior_N and prior_p.
Now we have to compute the likelihood of the data for each pair of parameters.
The Update
To allocate space for the likelihoods, it is convenient to make a copy of joint_pmf:
End of explanation
"""
observed = k01 + k10 + k11
for N, p in joint_pmf.index:
k00 = N - observed
x = [k00, k01, k10, k11]
q = 1-p
y = [q*q, q*p, p*q, p*p]
likelihood[N, p] = multinomial.pmf(x, N, y)
"""
Explanation: As we loop through the pairs of parameters, we compute the likelihood of the data as in the previous section, and then store the result as an element of likelihood.
End of explanation
"""
posterior_pmf = joint_pmf * likelihood
posterior_pmf.normalize()
"""
Explanation: Now we can compute the posterior in the usual way.
End of explanation
"""
joint_posterior = posterior_pmf.unstack()
"""
Explanation: We'll use plot_contour again to visualize the joint posterior distribution.
But remember that the posterior distribution we just computed is represented as a Pmf, which is a Series, and plot_contour expects a DataFrame.
Since we used stack to convert from a DataFrame to a Series, we can use unstack to go the other way.
End of explanation
"""
from utils import plot_contour
plot_contour(joint_posterior)
decorate(title='Joint posterior distribution of N and p')
"""
Explanation: And here's what the result looks like.
End of explanation
"""
from utils import marginal
posterior2_p = marginal(joint_posterior, 0)
posterior2_N = marginal(joint_posterior, 1)
"""
Explanation: The most likely values of N are near 100, as in the previous model. The most likely values of p are near 0.2.
The shape of this contour indicates that these parameters are correlated. If p is near the low end of the range, the most likely values of N are higher; if p is near the high end of the range, N is lower.
Now that we have a posterior DataFrame, we can extract the marginal distributions in the usual way.
End of explanation
"""
posterior2_p.plot(color='C1')
decorate(xlabel='Probability of observing a bear',
ylabel='PDF',
title='Posterior marginal distribution of p')
"""
Explanation: Here's the posterior distribution for p:
End of explanation
"""
posterior_N.plot(label='one-parameter model', color='C4')
posterior2_N.plot(label='two-parameter model', color='C1')
decorate(xlabel='Population of bears (N)',
ylabel='PDF',
title='Posterior marginal distribution of N')
"""
Explanation: The most likely values are near 0.2.
Here's the posterior distribution for N based on the two-parameter model, along with the posterior we got using the one-parameter (hypergeometric) model.
End of explanation
"""
print(posterior_N.mean(),
posterior_N.credible_interval(0.9))
print(posterior2_N.mean(),
posterior2_N.credible_interval(0.9))
"""
Explanation: With the two-parameter model, the mean is a little lower and the 90% credible interval is a little narrower.
End of explanation
"""
N1 = 138
"""
Explanation: The two-parameter model yields a narrower posterior distribution for N, compared to the one-parameter model, because it takes advantage of an additional source of information: the consistency of the two observations.
To see how this helps, consider a scenario where N is relatively low, like 138 (the posterior mean of the two-parameter model).
End of explanation
"""
mean = (23 + 19) / 2
p = mean/N1
p
"""
Explanation: Given that we saw 23 bears during the first trial and 19 during the second, we can estimate the corresponding value of p.
End of explanation
"""
from scipy.stats import binom
binom(N1, p).std()
"""
Explanation: With these parameters, how much variability do you expect in the number of bears from one trial to the next? We can quantify that by computing the standard deviation of the binomial distribution with these parameters.
End of explanation
"""
N2 = 173
p = mean/N2
p
"""
Explanation: Now let's consider a second scenario where N is 173, the posterior mean of the one-parameter model. The corresponding value of p is lower.
End of explanation
"""
binom(N2, p).std()
"""
Explanation: In this scenario, the variation we expect to see from one trial to the next is higher.
End of explanation
"""
import pandas as pd
from seaborn import JointGrid
def joint_plot(joint, **options):
"""Show joint and marginal distributions.
joint: DataFrame that represents a joint distribution
options: passed to JointGrid
"""
# get the names of the parameters
x = joint.columns.name
x = 'x' if x is None else x
y = joint.index.name
y = 'y' if y is None else y
# make a JointGrid with minimal data
data = pd.DataFrame({x:[0], y:[0]})
g = JointGrid(x=x, y=y, data=data, **options)
# replace the contour plot
g.ax_joint.contour(joint.columns,
joint.index,
joint,
cmap='viridis')
# replace the marginals
marginal_x = marginal(joint, 0)
g.ax_marg_x.plot(marginal_x.qs, marginal_x.ps)
marginal_y = marginal(joint, 1)
g.ax_marg_y.plot(marginal_y.ps, marginal_y.qs)
joint_plot(joint_posterior)
"""
Explanation: So if the number of bears we observe is the same in both trials, that would be evidence for lower values of N, where we expect more consistency.
If the number of bears is substantially different between the two trials, that would be evidence for higher values of N.
In the actual data, the difference between the two trials is low, which is why the posterior mean of the two-parameter model is lower.
The two-parameter model takes advantage of additional information, which is why the credible interval is narrower.
Joint and Marginal Distributions
Marginal distributions are called "marginal" because in a common visualization they appear in the margins of the plot.
Seaborn provides a class called JointGrid that creates this visualization.
The following function uses it to show the joint and marginal distributions in a single plot.
End of explanation
"""
k10 = 20 - 3
k01 = 15 - 3
k11 = 3
"""
Explanation: A JointGrid is a concise way to represent the joint and marginal distributions visually.
The Lincoln Index Problem
In an excellent blog post, John D. Cook wrote about the Lincoln index, which is a way to estimate the
number of errors in a document (or program) by comparing results from
two independent testers.
Here's his presentation of the problem:
"Suppose you have a tester who finds 20 bugs in your program. You
want to estimate how many bugs are really in the program. You know
there are at least 20 bugs, and if you have supreme confidence in your
tester, you may suppose there are around 20 bugs. But maybe your
tester isn't very good. Maybe there are hundreds of bugs. How can you
have any idea how many bugs there are? There's no way to know with one
tester. But if you have two testers, you can get a good idea, even if
you don't know how skilled the testers are."
Suppose the first tester finds 20 bugs, the second finds 15, and they
find 3 in common; how can we estimate the number of bugs?
This problem is similar to the Grizzly Bear problem, so I'll represent the data in the same way.
End of explanation
"""
p0, p1 = 0.2, 0.15
"""
Explanation: But in this case it is probably not reasonable to assume that the testers have the same probability of finding a bug.
So I'll define two parameters, p0 for the probability that the first tester finds a bug, and p1 for the probability that the second tester finds a bug.
I will continue to assume that the probabilities are independent, which is like assuming that all bugs are equally easy to find. That might not be a good assumption, but let's stick with it for now.
As an example, suppose we know that the probabilities are 0.2 and 0.15.
End of explanation
"""
def compute_probs(p0, p1):
"""Computes the probability for each of 4 categories."""
q0 = 1-p0
q1 = 1-p1
return [q0*q1, q0*p1, p0*q1, p0*p1]
y = compute_probs(p0, p1)
y
"""
Explanation: We can compute the array of probabilities, y, like this:
End of explanation
"""
qs = np.arange(32, 350, step=5)
prior_N = make_uniform(qs, name='N')
prior_N.head(3)
"""
Explanation: With these probabilities, there is a
68% chance that neither tester finds the bug and a
3% chance that both do.
Pretending that these probabilities are known, we can compute the posterior distribution for N.
Here's a prior distribution that's uniform from 32 to 350 bugs.
End of explanation
"""
data = np.array([0, k01, k10, k11])
"""
Explanation: I'll put the data in an array, with 0 as a place-keeper for the unknown value k00.
End of explanation
"""
likelihood = prior_N.copy()
observed = data.sum()
x = data.copy()
for N in prior_N.qs:
x[0] = N - observed
likelihood[N] = multinomial.pmf(x, N, y)
"""
Explanation: And here are the likelihoods for each value of N, with ps as a constant.
End of explanation
"""
posterior_N = prior_N * likelihood
posterior_N.normalize()
"""
Explanation: We can compute the posterior in the usual way.
End of explanation
"""
posterior_N.plot(color='C4')
decorate(xlabel='Number of bugs (N)',
ylabel='PMF',
title='Posterior marginal distribution of n with known p1, p2')
print(posterior_N.mean(),
posterior_N.credible_interval(0.9))
"""
Explanation: And here's what it looks like.
End of explanation
"""
qs = np.linspace(0, 1, num=51)
prior_p0 = make_uniform(qs, name='p0')
prior_p1 = make_uniform(qs, name='p1')
"""
Explanation: With the assumption that p0 and p1 are known to be 0.2 and 0.15, the posterior mean is 102 with 90% credible interval (77, 127).
But this result is based on the assumption that we know the probabilities, and we don't.
Three-Parameter Model
What we need is a model with three parameters: N, p0, and p1.
We'll use prior_N again for the prior distribution of N, and here are the priors for p0 and p1:
End of explanation
"""
joint2 = make_joint(prior_p0, prior_N)
joint2.shape
"""
Explanation: Now we have to assemble them into a joint prior with three dimensions.
I'll start by putting the first two into a DataFrame.
End of explanation
"""
joint2_pmf = Pmf(joint2.stack())
joint2_pmf.head(3)
"""
Explanation: Now I'll stack them, as in the previous example, and put the result in a Pmf.
End of explanation
"""
joint3 = make_joint(prior_p1, joint2_pmf)
joint3.shape
"""
Explanation: We can use make_joint again to add in the third parameter.
End of explanation
"""
joint3.head(3)
"""
Explanation: The result is a DataFrame with values of N and p0 in a MultiIndex that goes down the rows and values of p1 in an index that goes across the columns.
End of explanation
"""
joint3_pmf = Pmf(joint3.stack())
joint3_pmf.head(3)
"""
Explanation: Now I'll apply stack again:
End of explanation
"""
joint3_pmf.shape
"""
Explanation: The result is a Pmf with a three-column MultiIndex containing all possible triplets of parameters.
The number of rows is the product of the number of values in all three priors, which is almost 170,000.
End of explanation
"""
likelihood = joint3_pmf.copy()
observed = data.sum()
x = data.copy()
for N, p0, p1 in joint3_pmf.index:
x[0] = N - observed
y = compute_probs(p0, p1)
likelihood[N, p0, p1] = multinomial.pmf(x, N, y)
"""
Explanation: That's still small enough to be practical, but it will take longer to compute the likelihoods than in the previous examples.
Here's the loop that computes the likelihoods; it's similar to the one in the previous section:
End of explanation
"""
posterior_pmf = joint3_pmf * likelihood
posterior_pmf.normalize()
"""
Explanation: We can compute the posterior in the usual way.
End of explanation
"""
posterior_N = posterior_pmf.marginal(0)
"""
Explanation: Now, to extract the marginal distributions, we could unstack the joint posterior as we did in the previous section.
But Pmf provides a version of marginal that works with a Pmf rather than a DataFrame.
Here's how we use it to get the posterior distribution for N.
End of explanation
"""
posterior_N.plot(color='C4')
decorate(xlabel='Number of bugs (N)',
ylabel='PDF',
title='Posterior marginal distributions of N')
posterior_N.mean()
"""
Explanation: And here's what it looks like.
End of explanation
"""
posterior_p1 = posterior_pmf.marginal(1)
posterior_p2 = posterior_pmf.marginal(2)
posterior_p1.plot(label='p1')
posterior_p2.plot(label='p2')
decorate(xlabel='Probability of finding a bug',
ylabel='PDF',
title='Posterior marginal distributions of p1 and p2')
posterior_p1.mean(), posterior_p1.credible_interval(0.9)
posterior_p2.mean(), posterior_p2.credible_interval(0.9)
"""
Explanation: The posterior mean is 105 bugs, which suggests that there are still many bugs the testers have not found.
Here are the posteriors for p0 and p1.
End of explanation
"""
data2 = np.array([0, 73, 86, 49])
"""
Explanation: Comparing the posterior distributions, the tester who found more bugs probably has a higher probability of finding bugs. The posterior means are about 23% and 18%. But the distributions overlap, so we should not be too sure.
This is the first example we've seen with three parameters.
As the number of parameters increases, the number of combinations increases quickly.
The method we've been using so far, enumerating all possible combinations, becomes impractical if the number of parameters is more than 3 or 4.
However there are other methods that can handle models with many more parameters, as we'll see in <<_MCMC>>.
Summary
The problems in this chapter are examples of mark and recapture experiments, which are used in ecology to estimate animal populations. They also have applications in engineering, as in the Lincoln index problem. And in the exercises you'll see that they are used in epidemiology, too.
This chapter introduces two new probability distributions:
The hypergeometric distribution is a variation of the binomial distribution in which samples are drawn from the population without replacement.
The multinomial distribution is a generalization of the binomial distribution where there are more than two possible outcomes.
Also in this chapter, we saw the first example of a model with three parameters. We'll see more in subsequent chapters.
Exercises
Exercise: In an excellent paper, Anne Chao explains how mark and recapture experiments are used in epidemiology to estimate the prevalence of a disease in a human population based on multiple incomplete lists of cases.
One of the examples in that paper is a study "to estimate the number of people who were infected by hepatitis in an outbreak that occurred in and around a college in northern Taiwan from April to July 1995."
Three lists of cases were available:
135 cases identified using a serum test.
122 cases reported by local hospitals.
126 cases reported on questionnaires collected by epidemiologists.
In this exercise, we'll use only the first two lists; in the next exercise we'll bring in the third list.
Make a joint prior and update it using this data, then compute the posterior mean of N and a 90% credible interval.
The following array contains 0 as a place-holder for the unknown value of k00, followed by known values of k01, k10, and k11.
End of explanation
"""
qs = np.arange(200, 500, step=5)
prior_N = make_uniform(qs, name='N')
prior_N.head(3)
qs = np.linspace(0, 0.98, num=50)
prior_p = make_uniform(qs, name='p')
prior_p.head(3)
# Solution
joint_prior = make_joint(prior_p, prior_N)
joint_prior.head(3)
# Solution
prior_pmf = Pmf(joint_prior.stack())
prior_pmf.head(3)
# Solution
observed = data2.sum()
x = data2.copy()
likelihood = prior_pmf.copy()
for N, p in prior_pmf.index:
x[0] = N - observed
q = 1-p
y = [q*q, q*p, p*q, p*p]
likelihood.loc[N, p] = multinomial.pmf(x, N, y)
# Solution
posterior_pmf = prior_pmf * likelihood
posterior_pmf.normalize()
# Solution
joint_posterior = posterior_pmf.unstack()
# Solution
plot_contour(joint_posterior)
decorate(title='Joint posterior distribution of N and p')
# Solution
marginal_N = marginal(joint_posterior, 1)
marginal_N.plot(color='C4')
decorate(xlabel='Number of cases (N)',
ylabel='PDF',
title='Posterior marginal distribution of N')
# Solution
marginal_N.mean(), marginal_N.credible_interval(0.9)
"""
Explanation: These data indicate that there are 73 cases on the second list that are not on the first, 86 cases on the first list that are not on the second, and 49 cases on both lists.
To keep things simple, we'll assume that each case has the same probability of appearing on each list. So we'll use a two-parameter model where N is the total number of cases and p is the probability that any case appears on any list.
Here are priors you can start with (but feel free to modify them).
End of explanation
"""
data3 = np.array([0, 63, 55, 18, 69, 17, 21, 28])
"""
Explanation: Exercise: Now let's do the version of the problem with all three lists. Here's the data from Chou's paper:
Hepatitis A virus list
P Q E Data
1 1 1 k111 =28
1 1 0 k110 =21
1 0 1 k101 =17
1 0 0 k100 =69
0 1 1 k011 =18
0 1 0 k010 =55
0 0 1 k001 =63
0 0 0 k000 =??
Write a loop that computes the likelihood of the data for each pair of parameters, then update the prior and compute the posterior mean of N. How does it compare to the results using only the first two lists?
Here's the data in a NumPy array (in reverse order).
End of explanation
"""
q = 1-p
ps = [q*q, q*p, p*q, p*p]
"""
Explanation: Again, the first value is a place-keeper for the unknown k000. The second value is k001, which means there are 63 cases that appear on the third list but not the first two. And the last value is k111, which means there are 28 cases that appear on all three lists.
In the two-list version of the problem we computed ps by enumerating the combinations of p and q.
End of explanation
"""
def cartesian_product(*args, **options):
"""Cartesian product of sequences.
args: any number of sequences
options: passes to `MultiIndex.from_product`
returns: DataFrame with one column per sequence
"""
index = pd.MultiIndex.from_product(args, **options)
return pd.DataFrame(index=index).reset_index()
"""
Explanation: We could do the same thing for the three-list version, computing the probability for each of the eight categories. But we can generalize it by recognizing that we are computing the cartesian product of p and q, repeated once for each list.
And we can use the following function (based on this StackOverflow answer) to compute Cartesian products:
End of explanation
"""
p = 0.2
t = (1-p, p)
df = cartesian_product(t, t, t)
df
"""
Explanation: Here's an example with p=0.2:
End of explanation
"""
y = df.prod(axis=1)
y
"""
Explanation: To compute the probability for each category, we take the product across the columns:
End of explanation
"""
# Solution
observed = data3.sum()
x = data3.copy()
likelihood = prior_pmf.copy()
for N, p in prior_pmf.index:
x[0] = N - observed
t = (1-p, p)
df = cartesian_product(t, t, t)
y = df.prod(axis=1)
likelihood.loc[N, p] = multinomial.pmf(x, N, y)
# Solution
posterior_pmf = prior_pmf * likelihood
posterior_pmf.normalize()
# Solution
joint_posterior = posterior_pmf.unstack()
# Solution
plot_contour(joint_posterior)
decorate(title='Joint posterior distribution of N and p')
# Solution
marginal3_N = marginal(joint_posterior, 1)
# Solution
marginal_N.plot(label='After two lists', color='C4')
marginal3_N.plot(label='After three lists', color='C1')
decorate(xlabel='Number of cases (N)',
ylabel='PDF',
title='Posterior marginal distribution of N')
# Solution
marginal_N.mean(), marginal_N.credible_interval(0.9)
# Solution
marginal3_N.mean(), marginal3_N.credible_interval(0.9)
"""
Explanation: Now you finish it off from there.
End of explanation
"""
|
saalfeldlab/template-building
|
python/analysis/H5TranformFormatTables.ipynb
|
bsd-2-clause
|
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
bridge_list = ['JRC2018F_FAFB', 'JRC2018F_FCWB', 'JRC2018F_JFRC2010', 'JRC2018F_JFRC2013', 'JRC2018F_TEFOR']
sizes_data_f="/groups/saalfeld/home/bogovicj/dev/template/template-building-pub/scripts/h5Analysis/h5Sizes.csv"
sizes_h5 = pd.read_csv( sizes_data_f )
sizes_h5
"""
Explanation: Tables and results describing h5 transform files
End of explanation
"""
ants_transform_sizes = [
('JRC2018F_FAFB_Warp.nii',2833742131),
('JRC2018F_FAFB_InverseWarp.nii',2832463797),
('JRC2018F_FCWB_Warp.nii',7893413462),
('JRC2018F_FCWB_InverseWarp.nii',7897010532),
('JRC2018F_JFRC2010_Warp.nii',8133116764),
('JRC2018F_JFRC2010_InverseWarp.nii',8143715252),
('JRC2018F_JFRC2013_Warp.nii',8002888343),
('JRC2018F_JFRC2013_InverseWarp.nii',8003178281),
('JRC2018F_TEFOR_Warp.nii',8127594570),
('JRC2018F_TEFOR_InverseWarp.nii',8136532225),
]
ants_df_raw = pd.DataFrame( ants_transform_sizes )
ants_df_raw.columns = [ 'file', 'size(bytes)']
def bridge_index( x, bridge_list ):
for i, b in enumerate( bridge_list ):
if x.startswith( b ):
return i
ants_df_raw['bridge_idx'] = ants_df_raw.apply( lambda x: (bridge_index(x['file'], bridge_list )), axis=1)
# Build a dataframe containing the combined size of the compressed inverse and forward fields
ants_df_data = []
for i in range( len(bridge_list)):
ants_df_data += [ (bridge_list[i], ants_df_raw[ants_df_raw.bridge_idx == i ]['size(bytes)'].sum()) ]
ants_df = pd.DataFrame( ants_df_data )
ants_df.columns = [ 'file', 'size(bytes)']
# Combine h5 size and ants dfield size tables
billion=1e9
sizes_h5['file'] = sizes_h5.apply( lambda x: x['file'].replace('.h5',''), axis=1 )
df = sizes_h5.set_index('file').join(ants_df.set_index('file'), rsuffix='_ants')
# Compute relative size
df['ratio'] = df.apply( lambda x: x['size(bytes)']/x['size(bytes)_ants'], axis=1)
df['h5 Size (GB)'] = df.apply( lambda x: x['size(bytes)']/billion, axis=1)
df['Size (GB)'] = df.apply( lambda x: x['size(bytes)_ants']/billion, axis=1)
df
df.index.names = ['Transform']
df.columns= ['size(bytes)', 'size(bytes)_ants', 'Size Ratio', 'h5 Size (Gb)', 'Size (Gb)']
df_toWrite = df[[ 'Size (Gb)', 'h5 Size (Gb)','Size Ratio']]
h5size_table_f = 'h5SizeTable_%s.tex'%(datetime.date.today().strftime('%Y%m%d'))
# Should I write
if( False ):
print( 'writing : ', h5size_table_f )
with open( h5size_table_f, 'w') as f:
f.write( df_toWrite.to_latex())
"""
Explanation: Here I'll tabulate sizes of various transformations out of ants
These came from the transforms here:
/groups/saalfeld/public/jrc2018/transformations
Specifically, these subfolders / files:
* JRC2018F_FAFB/*Warp.nii*
* jrc2018F-FCWB/*Warp.nii*
* jrc2018F-jfrc2010/*Warp.nii*
* jrc2018F-jfrc2013/*Warp.nii*
* JRC2018F_TEFOR/*Warp.nii*
End of explanation
"""
factors_by_level = { 0:1, 1:2, 2:4 }
err_data_f="/groups/saalfeld/home/bogovicj/dev/template/template-building-pub/examples/errTableLevels.csv"
err_df = pd.read_csv( err_data_f )
# make column for downsampling factor
err_df['downsample factor'] = err_df.apply( lambda x: factors_by_level[ x['level']], axis=1 )
h5err_table_f = 'h5QuantErrorTable_%s.tex'%(datetime.date.today().strftime('%Y%m%d'))
err_toWrite = err_df[['xfm', 'downsample factor', 'avg','max']]
err_toWrite.set_index(['xfm','downsample factor'], inplace=True)
pd.options.display.float_format = '{:,.3f}'.format
# Should I write
if( False ):
print( 'writing : ', h5err_table_f )
with open( h5err_table_f, 'w') as f:
f.write( err_toWrite.to_latex())
"""
Explanation: Quantization / downsampling errors
End of explanation
"""
h5IndivTransformF = "/groups/saalfeld/home/bogovicj/dev/template/template-building-pub/scripts/h5Analysis/h5timesIndv.csv"
antsIndivTransformF = "/groups/saalfeld/home/bogovicj/dev/template/template-building-pub/scripts/h5Analysis/antstimesIndv.csv"
h5SkelTimes = pd.read_csv( h5IndivTransformF )
antsSkelTimes = pd.read_csv( antsIndivTransformF )
print( 'ants mean skel time (ms): ', antsSkelTimes['time'].mean() )
print( 'ants std skel time (ms): ', antsSkelTimes['time'].std() )
print( 'h5 mean skel time (ms): ', h5SkelTimes['time'].mean() )
print( 'h5 std skel time (ms): ', h5SkelTimes['time'].std() )
print( 'num skels: ')
print( ' ')
print( 'ants mean skel time (s): ', (antsSkelTimes['time']/1000).mean() )
print( 'ants std skel time (s): ', (antsSkelTimes['time']/1000).std() )
print( 'h5 mean skel time (s): ', (h5SkelTimes['time']/1000).mean() )
print( 'h5 std skel time (s): ', (h5SkelTimes['time']/1000).std() )
print( ' ' )
print( 'relative speedup: ', (antsSkelTimes['time'].mean()/h5SkelTimes['time'].mean()))
"""
Explanation: Skeleton transform times
End of explanation
"""
h5ImgTransformF = "/groups/saalfeld/home/bogovicj/dev/template/template-building-pub/scripts/h5Analysis/h5ImageTransformTimes.csv"
antsImgTransformF = "/groups/saalfeld/home/bogovicj/dev/template/template-building-pub/scripts/h5Analysis/antsImageTransformTimes.csv"
h5ImgTimes = pd.read_csv( h5ImgTransformF )
antsImgTimes = pd.read_csv( antsImgTransformF )
h5ImgTimes['time(s)'] = h5ImgTimes.apply( lambda x: x['time']/1000., axis=1)
antsImgTimes['time(s)'] = antsImgTimes.apply( lambda x: x['time']/1000., axis=1)
print( 'ants mean img time (s): ', antsImgTimes['time(s)'].mean() )
print( 'ants std img time (s): ', antsImgTimes['time(s)'].std() )
print( ' ' )
print( 'h5 mean img time (s): ', h5ImgTimes['time(s)'].mean() )
print( 'h5 std img time (s): ', h5ImgTimes['time(s)'].std() )
"""
Explanation: Image transform times
End of explanation
"""
h5DatasetSizesF = "/groups/saalfeld/public/jrc2018/transformations/quantized_multiscale/sizesByDataset.csv"
h5datasetSizes_table_f = 'h5DatasetSizesTable_raw_%s.tex'%(datetime.date.today().strftime('%Y%m%d'))
pd.options.display.float_format = '{:,.4f}'.format
h5DatasetSizes = pd.read_csv( h5DatasetSizesF )
h5DatasetSizes['sizeGb'] = h5DatasetSizes['size'] / 1e9
h5DatasetSizes['sizeMb'] = h5DatasetSizes['size'] / 1e6
if( False ):
print( 'writing : ', h5datasetSizes_table_f )
with open( h5datasetSizes_table_f, 'w') as f:
f.write( h5DatasetSizes.to_latex())
# h5DatasetSizes
df_h5SizesErrs = h5DatasetSizes
df_h5SizesErrs
# df_h5SizesErrs['sizeRatios'] = df_h5SizesErrs.loc[ h5DatasetSizes['transform'] == 'JRC2018F_FAFB.' ]
# TODO ugly but ok
def sizeRatios( x ):
if x['transform'] == 'JRC2018F_FAFB' and x['direction'] == 'F':
#print('0')
return x['size'] / ants_transform_sizes[0][1]
elif x['transform'] == 'JRC2018F_FAFB' and x['direction'] == 'I':
#print('1')
return x['size'] / ants_transform_sizes[1][1]
elif x['transform'] == 'JRC2018F_FCWB' and x['direction'] == 'F':
#print('2')
return x['size'] / ants_transform_sizes[2][1]
elif x['transform'] == 'JRC2018F_FCWB' and x['direction'] == 'I':
#print('3')
return x['size'] / ants_transform_sizes[3][1]
elif x['transform'] == 'JRC2018F_JFRC2010' and x['direction'] == 'F':
#print('4')
return x['size'] / ants_transform_sizes[4][1]
elif x['transform'] == 'JRC2018F_JFRC2010' and x['direction'] == 'I':
#print('5')
return x['size'] / ants_transform_sizes[5][1]
elif x['transform'] == 'JRC2018F_JFRC2013' and x['direction'] == 'F':
#print('6')
return x['size'] / ants_transform_sizes[6][1]
elif x['transform'] == 'JRC2018F_JFRC2013' and x['direction'] == 'I':
#print('7')
return x['size'] / ants_transform_sizes[7][1]
elif x['transform'] == 'JRC2018F_TEFOR' and x['direction'] == 'F':
#print('8')
return x['size'] / ants_transform_sizes[8][1]
elif x['transform'] == 'JRC2018F_TEFOR' and x['direction'] == 'I':
#print('9')
return x['size'] / ants_transform_sizes[9][1]
else:
return -1
df_h5SizesErrs['sizeRatio'] = df_h5SizesErrs.apply( lambda x: sizeRatios(x), axis=1)
df_h5SizesErrs['sizeRatio_oneover'] = df_h5SizesErrs.apply( lambda x: 1.0/x['sizeRatio'], axis=1)
df_h5SizesErrs['xfm'] = df_h5SizesErrs.apply(
lambda x: '{}({})'.format(x['transform'], x['direction']) , axis=1)
# df_h5SizesErrs
# i = h5DatasetSizes['transform'] == 'JRC2018F_FAFB'
# df_h5SizesErrs.loc[i]['sizeRatios'] = df_h5SizesErrs.loc[ i ]['size'] / ants_transform_sizes[0][1]
# df_h5SizesErrs
h5datasetSizes_table_f = 'h5DatasetErrorsSizesTable_raw_%s.tex'%(datetime.date.today().strftime('%Y%m%d'))
size_err_df = err_df.set_index(['xfm','downsample factor']).join( df_h5SizesErrs.set_index(['xfm','downsample factor']))
size_err_df_writeme = size_err_df[['avg','max', 'sizeMb', 'sizeRatio_oneover']]
if( False ):
print( 'writing : ', h5datasetSizes_table_f )
with open( h5datasetSizes_table_f, 'w') as f:
f.write( size_err_df_writeme.to_latex())
size_err_df_writeme
"""
Explanation: h5 dataset sizes
End of explanation
"""
|
fja05680/pinkfish
|
examples/C00.sp500-components-timeseries/sp500-components-timeseries.ipynb
|
mit
|
from datetime import datetime
import pandas as pd
import pinkfish as pf
# -*- encoding: utf-8 -*-
%matplotlib inline
"""
Explanation: S&P 500 Components Time Series
Get time series of all S&P 500 components
End of explanation
"""
filename = 'sp500.csv'
symbols = pd.read_csv(filename)
symbols = sorted(list(symbols['Symbol']))
print(symbols)
"""
Explanation: Current S&P500 symbols.
See my SP500 project that generates the sp500.cvs file.
End of explanation
"""
now = datetime.now()
dt_string = now.strftime('%m-%d-%Y') # mm-dd-YYYY
dir_name = 'sp500-components-{}'.format(dt_string)
"""
Explanation: Create cache directory for current sp500 symbol timeseries
End of explanation
"""
pf.update_cache_symbols(symbols=symbols, dir_name=dir_name,from_year=2018)
"""
Explanation: Update time series for the symbols below.
Time series will be fetched for any symbols not already cached.
End of explanation
"""
|
dietmarw/EK5312_ElectricalMachines
|
Chapman/Ch5-Problem_5-01.ipynb
|
unlicense
|
%pylab notebook
%precision %.4g
"""
Explanation: Excercises Electric Machinery Fundamentals
Chapter 5
Problem 5-1
Note: You should first click on "Cell → Run All" in order that the plots get generated.
End of explanation
"""
Vt = 480 # [V]
PF = 0.8
fse = 60 # [Hz]
p = 8
Pout = 400 * 746 # [W] using the official "electrical horsepower" conversion
Xs = 0.6 # [Ohm]
"""
Explanation: Description
A 480-V, 60 Hz, 400-hp 0.8-PF-leading eight-pole $\Delta$-connected synchronous motor has a synchronous
reactance of $0.6\,\Omega$ and negligible armature resistance. Ignore its friction, windage, and core losses for the
purposes of this problem. Assume that $|\vec{E}_A|$ is directly proportional to the field current $I_F$ (in other
words, assume that the motor operates in the linear part of the magnetization curve), and that $|\vec{E}_A| = 480\,V$ when $I_F = 4\,A$.
End of explanation
"""
n_m = 120 * fse / p
print('''
n_m = {:.0f} r/min
==============='''.format(n_m))
"""
Explanation: (a)
What is the speed of this motor?
(b)
If this motor is initially supplying 400 hp at 0.8 PF lagging.
What are the magnitudes and angles of $\vec{E}_A$ and $\vec{I}_A$ ?
(c)
How much torque is this motor producing?
What is the torque angle $\delta$ ?
How near is this value to the maximum possible induced torque of the motor for this field current setting?
(d)
If $|\vec{E}_A|$ is increased by 30 percent.
What is the new magnitude of the armature current?
What is the motor’s new power factor?
(e)
Calculate and plot the motor’s V-curve for this load condition.
SOLUTION
(a)
The speed of this motor is given by:
$$n_m = \frac{120f_{se}}{P}$$
End of explanation
"""
Pin = Pout
il = Pin / (sqrt(3) * Vt * PF)
il # [A]
"""
Explanation: (b)
If losses are being ignored, the output power is equal to the input power. This situation is shown in the phasor diagram below:
<img src="figs/Problem_5-01.jpg" width="60%">
The line current flow under these circumstances is:
$$I_L = \frac{P}{\sqrt{3}V_T PF}$$
End of explanation
"""
ia = il / sqrt(3)
ia # [A]
"""
Explanation: Because the motor is $\Delta$-connected, the corresponding phase current is:
End of explanation
"""
Ia_angle = -arccos(PF)
Ia_angle /pi *180 # [degrees]
Ia = ia * (cos(Ia_angle) + sin(Ia_angle)*1j)
print('Ia = {:.0f} A ∠{:.2f}°'.format(abs(Ia), Ia_angle / pi *180))
"""
Explanation: The angle of the current is:
End of explanation
"""
EA = Vt - Xs * 1j * Ia
EA_angle = arctan(EA.imag/EA.real)
print('''
EA = {:.0f} V ∠{:.1f}°
=================='''.format(abs(EA), EA_angle/pi*180))
"""
Explanation: The internal generated voltage $\vec{E}A$ is:
$$\vec{E}_A = \vec{V}\phi - jX_S\vec{I}_A$$
End of explanation
"""
w_m = n_m * (1.0 / 60.0) * (2.0*pi/1.0)
tau_ind = Pout / w_m
print('''
tau_ind = {:.0f} Nm
================='''.format(tau_ind))
"""
Explanation: (c)
The induced torque is:
$$\tau_\text{ind} = \frac{P_\text{out}}{\omega_m}$$
End of explanation
"""
tau_ind_max = (3*Vt*abs(EA)) / (w_m * Xs)
print('''
tau_ind_max = {:.0f} Nm
======================'''.format(tau_ind_max))
"""
Explanation: The maximum possible induced torque for the motor at this field setting is the maximum possible power divided by $\omega_m$:
$$\tau_\text{ind,max} = \frac{3V_\phi E_A}{\omega_mX_S}$$
End of explanation
"""
tau_ind/tau_ind_max
"""
Explanation: The current operating torque is about
End of explanation
"""
Ea_2 = 1.30 * abs(EA)
Ea_2 # [V]
"""
Explanation: times the maximum possible torque.
(d)
If the magnitude of the internal generated voltage $E_A$ is increased by 30%, the new torque angle
can be found from the fact that $E_A\sin{\delta} \propto P =$ constant.
End of explanation
"""
delta_1 = EA_angle
delta_2 = arcsin(abs(EA) / Ea_2 * sin(delta_1))
delta_2/pi *180 # [degrees]
"""
Explanation: $$\delta_2 = \arcsin\left(\frac{E_{A1}}{E_{A2}}\sin{\delta_1}\right)$$
End of explanation
"""
EA2 = Ea_2 * (cos(delta_2)+sin(delta_2)*1j)
Ia_2 = (Vt - EA2) / (Xs*1j)
Ia_2_angle = arctan(Ia_2.imag/Ia_2.real)
PF2 = cos(Ia_2_angle)
print('''
Ia_2 = {:.0f} A ∠{:.2f}°
====================
PF2 = {:.3f}
============'''.format(abs(Ia_2), Ia_2_angle/pi*180, PF2))
"""
Explanation: The new armature current is:
$$\vec{I}{A2} = \frac{\vec{V}\phi - \vec{E}_{A2}}{jX_S}$$
End of explanation
"""
Ea_plot = linspace(0.90, 1.70, 81) * EA
"""
Explanation: (e)
A Python program to calculate and plot the motor’s V-curve is shown below:
Initialize values:
End of explanation
"""
delta_plot = arcsin(abs(EA) / Ea_plot * sin(delta_1))
"""
Explanation: Calculate $\delta_2$
End of explanation
"""
EA_plot = Ea_plot * (cos(delta_plot)+sin(delta_plot)*1j)
"""
Explanation: Calculate the phasor $E_A$
End of explanation
"""
Ia_plot = (Vt - Ea_plot) / (Xs*1j)
"""
Explanation: Calculate $I_A$
End of explanation
"""
title(r'Armature current versus $E_A$')
xlabel(r'$E_A$ [kV]')
ylabel(r'$I_A$ [A]')
plot(abs(EA_plot)/1000,abs(Ia_plot), linewidth = 2)
grid()
"""
Explanation: Plot the v-curve
End of explanation
"""
|
yunqu/PYNQ
|
boards/Pynq-Z1/base/notebooks/arduino/arduino_grove_ledbar.ipynb
|
bsd-3-clause
|
# Make sure the base overlay is loaded
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
"""
Explanation: Grove LED Bar Example
This example shows how to use the Grove LED Bar on the board. The LED bar has 10 LEDs: 8 green LEDs, 1 orange LED, and 1 red LED. The brightness for each LED can be set independantly.
For this notebook, a PYNQ Arduino shield is also required. The LED bar is attached to the G4 connection on the shield. The grove LED bar also works with PMODA and PMODB on the board.
End of explanation
"""
from pynq.lib.arduino import Grove_LEDbar
from pynq.lib.arduino import ARDUINO_GROVE_G4
# Instantiate Grove LED Bar on Arduino shield G4
ledbar = Grove_LEDbar(base.ARDUINO,ARDUINO_GROVE_G4)
ledbar.reset()
"""
Explanation: 1. Instantiate and reset LED Bar
End of explanation
"""
from time import sleep
# Light up different bars in a loop
for i in range(2):
ledbar.write_binary(0b1010100000)
sleep(0.5)
ledbar.write_binary(0b0000100100)
sleep(0.5)
ledbar.write_binary(0b1010101110)
sleep(0.5)
ledbar.write_binary(0b1111111110)
sleep(0.5)
"""
Explanation: 2. Turn individual LEDs on or off
Write a 10-bit binary pattern, with each bit representing the corresponding LED. 1 = on, 0 = off
End of explanation
"""
# Brightness 0-255
HIGH = 0xFF
MED = 0xAA
LOW = 0x01
OFF = 0X00
brightness = [OFF, OFF, OFF, LOW, LOW, MED, MED, HIGH, HIGH, HIGH]
ledbar.write_brightness(0b1111111111,brightness)
"""
Explanation: 3. Set LEDs individually with different brightness levels
The brightness of each LED can be set individually by writing a list of 10x 8-bit values to the LED bar. 0 is off, 0xff is full brightness.
End of explanation
"""
for i in range (1,11):
ledbar.write_level(i,3,0)
sleep(0.3)
for i in range (1,10):
ledbar.write_level(i,3,1)
sleep(0.3)
"""
Explanation: 4. Set the "level" or the number of LEDs which are set
A number or level of LEDs can be turned on, started from either end of the LED bar. For example, this feature could be used to indicate the level of something being measured.
write_level(level, bright_level, green_to_red)
level is the number of LEDs that are on.
bright_level [0-10] is the level of brightness
green_to_red = 1 means the LEDs start being lit from the "green" end of the LED bar
green_to_red = 0 means the LEDs start being lit from the "red" end of the LED bar.
For example, ledbar.write_level(5,4,1) will light 5 LEDs, to brightness 4 (out of 10) and will start from the Green LED (the LED furthest away from Grove connector on the LED bar module.)
End of explanation
"""
btns = [base.buttons[index] for index in range(4)]
i = 1
ledbar.reset()
done = False
while not done:
if (btns[0].read()==1):
sleep(0.2)
ledbar.write_level(i,2,1)
i = min(i+1,9)
elif (btns[1].read()==1):
sleep(0.2)
i = max(i-1,0)
ledbar.write_level(i,2,1)
elif (btns[3].read()==1):
ledbar.reset()
done = True
"""
Explanation: 5. Controlling the LED Bar from the board buttons
This cell demonstrates controlling the "level" of the LEDs from onboard buttons.
Button 0 to increase level
Button 1 to decrease level
Button 3 to exit
End of explanation
"""
|
liquidscorpio/python-data-analysis
|
1-Working-with-relational-data-using-pandas.ipynb
|
gpl-2.0
|
import pandas as pd
# Some basic data
users = [
{ 'name': 'John', 'age': 29, 'id': 1 },
{ 'name': 'Doe', 'age': 19, 'id': 2 },
{ 'name': 'Alex', 'age': 32, 'id': 3 },
{ 'name': 'Rahul', 'age': 27, 'id': 4 },
{ 'name': 'Ellen', 'age': 23, 'id': 5},
{ 'name': 'Shristy', 'age': 30, 'id': 6}
]
users
# Using the above data as Foreign Key (FK)
likes = [
{ 'user_id': 1, 'likes': 'Mango' },
{ 'user_id': 1, 'likes': 'Pepsi' },
{ 'user_id': 2, 'likes': 'Burger' },
{ 'user_id': 2, 'likes': 'Mango' },
{ 'user_id': 3, 'likes': 'Cola' },
{ 'user_id': 4, 'likes': 'Orange' },
{ 'user_id': 3, 'likes': 'Cola' },
{ 'user_id': 2, 'likes': 'Pepsi' },
{ 'user_id': 3, 'likes': 'Carrot' },
{ 'user_id': 4, 'likes': 'Mango' },
{ 'user_id': 6, 'likes': 'Pepsi' },
]
likes
# Create Pandas DataFrame object and set
# appropriate index
df_users = pd.DataFrame(users)
df_users.set_index('id')
df_users
df_likes = pd.DataFrame(likes)
df_likes.set_index('user_id')
df_likes
# Using the FK relation to create a join
users_likes_join = df_users.merge(df_likes, left_on='id', right_on='user_id')
users_likes_join.set_index('user_id')
users_likes_join
# Changing left and right hand side of the relationship
likes_users_join = df_likes.merge(df_users, left_on='user_id', right_on='id')
likes_users_join.set_index('user_id')
likes_users_join
"""
Explanation: Working with relational data using Pandas
Testing the waters with sample relational data
Based on well defined theory and availability of highly mature, scalable and accessible relational database systems like Postgres, MariaDB and other commercial alternatives, relational data is pervasive in modern software development. Though, off late, the dominance of SQL systems is being challenged by flexibility of some No-SQL datastores, relational data and datastore continue to be an important source of raw datasets for many data analysis projects.
In this part we start-off with a simple relational dataset which will be augmented with more complexity as we proceed through the section. This dataset is then analysed using Pandas - a very nifty python package for working with various kinds of data, especially, tabular and relational data.
Why not use one of the many popular datasets
Being able to mentally replicate and cross check the result of an algorithm is pretty important in gaining confidence in data analysis. This is not always possible with, say, the Petals dataset or Reuters dataset for that matter. We therefore construct a small dataset of a nature which could very easily be found in many modern codebases and each time we arrive at a result, we can manually and independently compute the result and compare with that of our approach in code.
End of explanation
"""
# Food wise count of likes
food_wise = users_likes_join.groupby('likes')['likes'].count()
food_wise
# Lets sort our data. Default order is ascending
asc_sort = food_wise.sort_values()
asc_sort
# An example for descending
dsc_sort = food_wise.sort_values(ascending=False)
dsc_sort
"""
Explanation: Basic Aggregation Operations
End of explanation
"""
# Using in_place sort for memory efficiency
# Notice there is no left hand side value
food_wise.sort_values(ascending=False, inplace=True)
# food_wise itself has changed
food_wise
"""
Explanation: QUICK NOTE ABOUT sort_values
By default sort_values allocates new memory each time it is called. While working with larger production data we can be limited by the available memory on our machines vis-a-vis the dataset size (and really we do not wish to hit the SWAP partition even on SSDs). In such a situation, we can set the keyword argument inplace=True, this will modify the current DataFrame it self instead of allocating new memory.
Beware though mutation, while memory efficient, can be a risky affair leading to complex code paths and hard to reason about code.
End of explanation
"""
%matplotlib inline
import matplotlib
# ggplot is theme of matplotlib which adds
# some visual asthetics to our charts. It is
# inspired from the eponymous charting package
# of the R programming language
matplotlib.style.use('ggplot')
# Every DataFrame object exposes a plot object
# which can be used to generate different plots
# A pie chart, figsize allows us to define size of the
# plot as a tuple of (width, height) in inches
food_wise.plot.pie(figsize=(7, 7))
# A bar chart
food_wise.plot.bar(figsize=(7, 7))
# Horizontal bar chart
food_wise.plot.barh(figsize=(7, 7))
# Lets plot the most active users - those who hit like
# very often using the above techniques
# Get the users by number of likes they have
user_agg = users_likes_join.groupby('name')['likes'].count()
# Here we go: Our most active users in a different color
user_agg.plot.barh(figsize=(6, 6), color='#10d3f6')
"""
Explanation: Working with visualisations and charts
We use the python package - matplotlib - to generate visualisations and charts for our analysis. The command %matplotlib inline is a handy option which embeds the charts directly into our ipython/jupyter notebook.
While we can directly configure and call matplotlib functions to generate charts. Pandas, via the DataFrame object, exposes some very convenient methods to quickly generate plots.
End of explanation
"""
# Users who never interact with our data
df_users[~df_users.id.isin(df_likes['user_id'])]
"""
Explanation: matplotlib does provide many more options to generate complex and we will explore more of them as we proceed.
We assemble data and massage it with the sole purpose seeking insights, getting our questions answered - exactly where pandas shines.
Asking questions of our data
Pandas supports boolean indexing using the square bracket notation - []. Boolean indexing enables us to pass a predicate which can be used for among other things for filtering. Pandas also provides negation operator ~ to filter based on opposite of our predicate.
End of explanation
"""
# Oldest user who has exactly 2 likes
agg_values = (
users_likes_join
.groupby(['user_id', 'name', 'age'])
.agg({ 'likes': 'count' })
.sort_index(level=['age'], sort_remaining=False, ascending=False)
)
agg_values[agg_values['likes'] == 2].head(1)
"""
Explanation: Since pandas DataFrame is a column based abstraction (as against row) we need to reset_index after an aggregation operation in order retrieve flat DataFrame which is convenient to query.
End of explanation
"""
# Oldest user who has at least 2 likes
agg_values[agg_values['likes'] >= 2].head(1)
# Lets augment our data a little more
users = users + [
{ 'id': 7, 'name': 'Yeti', 'age': 40 },
{ 'id': 8, 'name': 'Commander', 'age': 31 },
{ 'id': 9, 'name': 'Jonnah', 'age': 26 },
{ 'id': 10, 'name': 'Hex', 'age': 28 },
{ 'id': 11, 'name': 'Sam', 'age': 33 },
{ 'id': 12, 'name': 'Madan', 'age': 53 },
{ 'id': 13, 'name': 'Harry', 'age': 38 },
{ 'id': 14, 'name': 'Tom', 'age': 29 },
{ 'id': 15, 'name': 'Daniel', 'age': 23 },
{ 'id': 16, 'name': 'Virat', 'age': 24 },
{ 'id': 17, 'name': 'Nathan', 'age': 16 },
{ 'id': 18, 'name': 'Stepheny', 'age': 26 },
{ 'id': 19, 'name': 'Lola', 'age': 31 },
{ 'id': 20, 'name': 'Amy', 'age': 25 },
]
users, len(users)
likes = likes + [
{ 'user_id': 17, 'likes': 'Mango' },
{ 'user_id': 14, 'likes': 'Orange'},
{ 'user_id': 18, 'likes': 'Burger'},
{ 'user_id': 19, 'likes': 'Blueberry'},
{ 'user_id': 7, 'likes': 'Cola'},
{ 'user_id': 11, 'likes': 'Burger'},
{ 'user_id': 13, 'likes': 'Mango'},
{ 'user_id': 1, 'likes': 'Coconut'},
{ 'user_id': 6, 'likes': 'Pepsi'},
{ 'user_id': 8, 'likes': 'Cola'},
{ 'user_id': 17, 'likes': 'Mango'},
{ 'user_id': 19, 'likes': 'Coconut'},
{ 'user_id': 15, 'likes': 'Blueberry'},
{ 'user_id': 20, 'likes': 'Soda'},
{ 'user_id': 3, 'likes': 'Cola'},
{ 'user_id': 4, 'likes': 'Pepsi'},
{ 'user_id': 14, 'likes': 'Coconut'},
{ 'user_id': 11, 'likes': 'Mango'},
{ 'user_id': 12, 'likes': 'Soda'},
{ 'user_id': 16, 'likes': 'Orange'},
{ 'user_id': 2, 'likes': 'Pepsi'},
{ 'user_id': 19, 'likes': 'Cola'},
{ 'user_id': 15, 'likes': 'Carrot'},
{ 'user_id': 18, 'likes': 'Carrot'},
{ 'user_id': 14, 'likes': 'Soda'},
{ 'user_id': 13, 'likes': 'Cola'},
{ 'user_id': 9, 'likes': 'Pepsi'},
{ 'user_id': 10, 'likes': 'Blueberry'},
{ 'user_id': 7, 'likes': 'Soda'},
{ 'user_id': 12, 'likes': 'Burger'},
{ 'user_id': 6, 'likes': 'Cola'},
{ 'user_id': 4, 'likes': 'Burger'},
{ 'user_id': 14, 'likes': 'Orange'},
{ 'user_id': 18, 'likes': 'Blueberry'},
{ 'user_id': 20, 'likes': 'Cola'},
{ 'user_id': 9, 'likes': 'Soda'},
{ 'user_id': 14, 'likes': 'Pepsi'},
{ 'user_id': 6, 'likes': 'Mango'},
{ 'user_id': 3, 'likes': 'Coconut'},
]
likes, len(likes)
"""
Explanation: In the above we used sort_index instead of sort_values because the groupby operation creates a MultiIndex
on columns user_id, name and age and since age is a part of an index sort_values cannot operate on it.
The head(n) function on a DataFrame returns first n records from the frame and the equivalent function tail(n) returns last n records from the frame.
End of explanation
"""
# DataFrames from native python dictionaries
df_users = pd.DataFrame(users)
df_likes = pd.DataFrame(likes)
"""
Explanation: Eating your own dog food
The above data has been copy-pasted and hand edited. A problem with this approach is the possibility of data containing more than one like for the same product by the same user. While we can manually check the data the approach will be tedious and untractable as the size of the data increases. Instead we employ pandas itself to indentify duplicate likes by the same person and fix the data accordingly.
End of explanation
"""
_duplicate_likes = (
df_likes
.groupby(['user_id', 'likes'])
.agg({ 'likes': 'count' })
)
duplicate_likes = _duplicate_likes[_duplicate_likes['likes'] > 1]
duplicate_likes
"""
Explanation: Lets figure out where are the duplicates
End of explanation
"""
# Now remove the duplicates
df_unq_likes = df_likes.drop_duplicates()
# The difference should be 6 since 6 records should be eliminated
len(df_unq_likes), len(df_likes)
"""
Explanation: So there are in all 6 duplicate records. User#2 and Pepsi is recorded twice so that is 1 extra, 2 extra for User#3 and Cola and 1 extra for rest of the three pairs, which equals, 1 + 2 + 1 + 1 + 1 = 6.
End of explanation
"""
# Join the datasets
users_likes_join = df_users.merge(df_unq_likes, left_on='id', right_on='user_id')
users_likes_join.set_index('id')
# We aggregate the likes column and rename it to `Records`
unq_user_likes_group = (
users_likes_join
.groupby(['id', 'name', 'likes'])
.agg({'likes': 'count'})
.rename(columns={ 'likes': 'num_likes' })
)
# Should return empty if duplicates are removed
unq_user_likes_group[unq_user_likes_group['num_likes'] > 1]
"""
Explanation: We replay our previous aggregation to verify no more duplicates indeed exist.
End of explanation
"""
# What percent of audience likes each fruit?
likes_count = (
users_likes_join
.groupby('likes')
.agg({ 'user_id': 'count' })
)
likes_count['percent'] = likes_count['user_id'] * 100 / len(df_users)
likes_count.sort_values('percent', ascending=False)
"""
Explanation: Lets continue with asking more questions of our data and gloss over some more convenience methods exposed by Pandas for aggregation.
End of explanation
"""
# What do people who like Coconut also like?
coconut_likers = users_likes_join[users_likes_join['likes'] == 'Coconut'].user_id
likes_among_coconut_likers = users_likes_join[(users_likes_join['user_id'].isin(coconut_likers)) & (users_likes_join['likes'] != 'Coconut')]
likes_among_coconut_likers.groupby('likes').agg({ 'user_id': pd.Series.nunique }).sort_values('user_id', ascending=False)
"""
Explanation: In the above code snippet we created a computed column percent in the likes_count DataFrame. Column operations in pandas are vectorized and execute significantly faster than row operations; always a good idea to express computations as column operations as against row operations.
End of explanation
"""
# What is the age group distribution of likes?
users_likes_join.groupby('likes').age.plot(kind='hist', legend=True, figsize=(10, 6))
"""
Explanation: In our fictitious database, Cola and Pepsi seem to be popular among the users who like Coconut.
End of explanation
"""
users_likes_join.groupby('likes').age.plot(kind='kde', legend=True, figsize=(10, 6))
"""
Explanation: Most of our audience seem to fall in the 25 - 40 years age group. But this visualisation has one flaw - if records are stacked on top of each other, only one of them will be visible. Lets try an alternative plot.
End of explanation
"""
# Age distribution only of people who like Soda
users_likes_join[users_likes_join['likes'] == 'Soda'].groupby('likes').age.plot(kind='hist', legend=True, figsize=(10, 6))
"""
Explanation: Anything surprising? Coconut - gray color - was not represented in the histogram. But from this visualisation, we can notice that coconut is popular among the 25 - 35 years age group only.
On the other hand, if we want to plot a specific "likable" object, we can simply filter our dataframe before groupby operation.
End of explanation
"""
|
gouthambs/karuth-source
|
content/extra/notebooks/pandas_vs_numpy.ipynb
|
artistic-2.0
|
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("seaborn-pastel")
%matplotlib inline
import seaborn.apionly as sns
import numpy as np
from timeit import timeit
import sys
iris = sns.load_dataset('iris')
data = pd.concat([iris]*100000)
data_rec = data.to_records()
print (len(data), len(data_rec))
"""
Explanation: Numpy and Pandas Performance Comparison
Goutham Balaraman
Pandas and Numpy are two packages that are core to a lot of data analysis. In this post I will compare the performance of numpy and pandas.
tl;dr:
- numpy consumes less memory compared to pandas
- numpy generally performs better than pandas for 50K rows or less
- pandas generally performs better than numpy for 500K rows or more
- for 50K to 500K rows, it is a toss up between pandas and numpy depending on the kind of operation
End of explanation
"""
MB = 1024*1024
print("Pandas %d MB " % (sys.getsizeof(data)/MB))
print("Numpy %d MB " % (sys.getsizeof(data_rec)/MB))
"""
Explanation: Here I have loaded the iris dataset and replicated it so as to have 15MM rows of data. The space requirement for 15MM rows of data in a pandas dataframe is more than twice that of a numpy recarray.
End of explanation
"""
data.head()
# <!-- collapse=True -->
def perf(inp, statement, grid=None):
length = len(inp)
gap = int(length/5)
#grid = np.array([int(x) for x in np.logspace(np.log10(gap), np.log10(length+1) , 5)])
if grid is None:
grid = np.array([10000, 100000, 1000000, 5000000, 10000000])
num = 100
time = []
data = {'pd': pd, 'np': np}
for i in grid:
if isinstance(inp, pd.DataFrame):
sel = inp.iloc[:i]
data['data'] = sel
else:
sel = inp[:i]
data['data_rec'] = sel
t = timeit(stmt=statement, globals=data, number=num)
time.append(t/num)
return grid, np.array(time)
def bench(pd_inp, pd_stmt, np_inp, np_stmt, title="", grid=None):
g,v1 = perf(pd_inp, pd_stmt, grid)
g,v2 = perf(np_inp, np_stmt, grid)
fig, ax = plt.subplots()
ax.loglog()
ax.plot(g, v1, label="pandas",marker="o", lw=2)
ax.plot(g, v2, label="numpy", marker="v", lw=2)
ax.set_xticks(g)
plt.legend(loc=2)
plt.xlabel("Number of Records")
plt.ylabel("Time (s)")
plt.grid(True)
plt.xlim(min(g)/2,max(g)*2)
plt.title(title)
"""
Explanation: A snippet of the data shown below.
End of explanation
"""
bench(data, "data.loc[:, 'sepal_length'].mean()",
data_rec, "np.mean(data_rec.sepal_length)",
title="Mean on Unfiltered Column")
"""
Explanation: In this post, performance metrics for a few different categories are compared between numpy and pandas:
- operations on a column of data, such as mean or applying a vectorised function
- operations on a filtered column of data
- vector operations on a column or filtered column
Operations on a Column
Here some performance metrics with operations on one column of data. The operations involved in here include fetching a view, and a reduction operation such as mean, vectorised log or a string based unique operation. All these are O(n) calculations. The mean calculation is orders of magnitude faster in numpy compared to pandas for array sizes of 100K or less. For sizes larger than 100K pandas maintains a lead over numpy.
End of explanation
"""
bench(data, "np.log(data.loc[:, 'sepal_length'])",
data_rec, "np.log(data_rec.sepal_length)",
title="Vectorised log on Unfiltered Column")
"""
Explanation: Below, the vectorized log operation is faster in numpy for sizes less than 100K but pandas costs about the same for sizes larger than 100K.
End of explanation
"""
bench(data, "data.loc[:,'species'].unique()",
data_rec, "np.unique(data_rec.species)",
grid=np.array([100, 1000, 10000, 100000, 1000000]),
title="Unique on Unfiltered String Column")
"""
Explanation: The one differentiating aspect about the test below is that the column species is of string type. The operation demonstrated is a unique calculation. We observe that the unique calculation is roughly an order of magnitude faster in pandas for sizes larger than 1K rows.
End of explanation
"""
bench(data, "data.loc[(data.sepal_width>3) & \
(data.petal_length<1.5), 'sepal_length'].mean()",
data_rec, "np.mean(data_rec[(data_rec.sepal_width>3) & \
(data_rec.petal_length<1.5)].sepal_length)",
grid=np.array([1000, 10000, 100000, 1000000]),
title="Mean on Filtered Column")
"""
Explanation: Operations on a Filtered Column
Below we perform the same tests as above, except that the column is not a full view, but is instead a filtered view. The filters are simple filters with an arithmetic bool comparison for the first two and a string comparison for the third below.
Below, mean is calculated for a filtered column sepal_length. Here performance of pandas is better for row sizes larger than 10K. In the mean on unfiltered column shown above, pandas performed better for 1MM or more. Just having selection operations has shifted performance chart in favor of pandas for even smaller number of records.
End of explanation
"""
bench(data, "np.log(data.loc[(data.sepal_width>3) & \
(data.petal_length<1.5), 'sepal_length'])",
data_rec, "np.log(data_rec[(data_rec.sepal_width>3) & \
(data_rec.petal_length<1.5)].sepal_length)",
grid=np.array([1000, 10000, 100000, 1000000]),
title="Vectorised log on Filtered Column")
"""
Explanation: For vectorised log operation on a unfiltered column shown above, numpy performed better than pandas for number of records less than 100K while the performance was comparable for the two for sizes larger than 100K. But the moment you introduce a filter on a column, pandas starts to show an edge over numpy for number of records larger than 10K.
End of explanation
"""
bench(data, "data[data.species=='setosa'].sepal_length.mean()",
data_rec, "np.mean(data_rec[data_rec.species=='setosa'].sepal_length)",
grid=np.array([1000, 10000, 100000, 1000000]),
title="Mean on (String) Filtered Column")
"""
Explanation: Here is another example of a mean reduction on a column but with a string filter. We see a similar behavior where numpy performs significantly better at small sizes and pandas takes a gentle lead for larger number of records.
End of explanation
"""
bench(data, "data.petal_length * data.sepal_length + \
data.petal_width * data.sepal_width",
data_rec, "data_rec.petal_length*data_rec.sepal_length + \
data_rec.petal_width * data_rec.sepal_width",
title="Vectorised Math on Unfiltered Columns")
"""
Explanation: Vectorized Operation on a Column
In this last section, we do vectorised arithmetic using multiple columns. This involves creating a view and vectorised math on these views. Even when there is no filter, pandas has a slight edge over numpy for large number of records. For smaller than 100K records, numpy performs significantly better.
End of explanation
"""
bench(data, "data.loc[data.sepal_width * data.petal_length > \
data.sepal_length, 'sepal_length'].mean()",
data_rec, "np.mean(data_rec[data_rec.sepal_width * data_rec.petal_length \
> data_rec.sepal_length].sepal_length)",
title="Vectorised Math in Filtering Columns",
grid=np.array([100, 1000, 10000, 100000, 1000000]))
"""
Explanation: In the following figure, the filter involves vectorised arithmetic operation, and mean reduction is computed on the filtered column. The presence of a filter makes pandas significantly faster for sizes larger than 100K, while numpy maitains a lead for smaller than 10K number of records.
End of explanation
"""
|
EmuKit/emukit
|
notebooks/Emukit-tutorial-Bayesian-optimization-introduction.ipynb
|
apache-2.0
|
### General imports
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
### --- Figure config
LEGEND_SIZE = 15
"""
Explanation: An Introduction to Bayesian Optimization with Emukit
Overview
End of explanation
"""
from emukit.test_functions import forrester_function
from emukit.core.loop.user_function import UserFunctionWrapper
from emukit.core import ContinuousParameter, ParameterSpace
target_function, space = forrester_function()
"""
Explanation: Navigation
What is Bayesian optimization?
The ingredients of Bayesian optimization
Emukit's Bayesian optimization interface
References
1. What is Bayesian optimization?
Given a function $f: \mathbb{X} \rightarrow \mathbb{R}$ which is defined in some constrained input space $\mathbb{X}$, Bayesian optimization (BO) [Shahriari et al, 2016] tries to find the global minimum $x_{\star} \in \mathbb{X}$ of the function $f$ by solving the global optimization problem :
$$ x_{\star} = \operatorname*{arg\:min}_{x \in \mathbb{X}} f(x). $$
Typically these objective functions $f$ are noisy, i.e $y(x) = f(x) + \epsilon$ with $\epsilon \sim N(0, \sigma_{noise})$ and expensive to evaluate. Additionally we assume that no gradient information is available and hence we treat $f$ as a black-box.
Popular examples for such black-box optimization problems are:
optimizing the hyperparameters of a machine learning algorithms such as for instance a neural network, where each function evaluation requires to train and validate the neural network
optimizing the parameters of a controller for a robot
etc.
There are two crucial bits in Bayesian optimization:
A prior probability measure $p(f)$ which captures our prior beliefs on $f$, called the model. Everytime we observe new data $D$ the prior will be updated to a 'posterior' $p(f|D)$ using the available data.
An acquisition function $a: \mathbb{X} \rightarrow \mathbb{R}$ which for each point in the input space quantifies the utility of evaluating this point. The central idea of the acquisition function is to trade off the exploration in regions of the input space where the model is still uncertain and the exploitation of the model's confidence about the good regions of the input space.
Given these ingredients, BO essentially iterates the following three steps until it achieves a predfined stopping criteria:
1. fit the model $p(f|D_{n})$ on the currently available data $D_{n}$.
2. find the most interesting point to evaluate by $x_{n+1} \in \operatorname*{arg\:max}{x \in \mathbb{X}} a(x)$
3. evaluate the objective function at $x{n+1}$, obtain $y_{n+1}$ and add the new observation to the data $D_{n+1} \leftarrow D_{n} \cup {x_{n+1}, y_{n+1}}$
2. The ingredients of Bayesian optimization
<h4 id='bo_intro_objective'>The Objective Function and the Input Space</h4>
As an example let's assume we want to optimize the one-dimensional forrester function:
$$
(6x - 2)^2\sin(12x - 4)
$$
which is defined over the interval $x \in [0, 1]$.
Conviently, this function is already implemented in Emukit. Note that in order to pass it to other Emukit modules we wrap the function by the UserFunctionWrapper interface.
End of explanation
"""
x_plot = np.linspace(space.parameters[0].min, space.parameters[0].max, 200)[:, None]
y_plot = target_function(x_plot)
plt.figure(figsize=(12, 8))
plt.plot(x_plot, y_plot, "k", label="Objective Function")
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
"""
Explanation: The space object defines the input space $X = [0, 1]$, which in this case is purely continuous and only one dimensional. In a later section we will see how we can also apply Bayesian optimization in other domains that contain discrete or categorical parameters.
Of course in reality, evaluating $f$ on a grid wouldn't be possible, but since the forrester function is a synthetic function we can evaluate it here for visualization purposes.
End of explanation
"""
X_init = np.array([[0.2],[0.6], [0.9]])
Y_init = target_function(X_init)
plt.figure(figsize=(12, 8))
plt.plot(X_init, Y_init, "ro", markersize=10, label="Observations")
plt.plot(x_plot, y_plot, "k", label="Objective Function")
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
"""
Explanation: <h4 id='bo_intro_init_design'> The Intial Design </h4>
Usually, before we start the actual BO loop we need to gather a few observations such that we can fit the model. This is called the initial design and common strategies are either a predefined grid or sampling points uniformly at random.
End of explanation
"""
import GPy
from emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper
gpy_model = GPy.models.GPRegression(X_init, Y_init, GPy.kern.RBF(1, lengthscale=0.08, variance=20), noise_var=1e-10)
emukit_model = GPyModelWrapper(gpy_model)
mu_plot, var_plot = emukit_model.predict(x_plot)
plt.figure(figsize=(12, 8))
plt.plot(X_init, Y_init, "ro", markersize=10, label="Observations")
plt.plot(x_plot, y_plot, "k", label="Objective Function")
plt.plot(x_plot, mu_plot, "C0", label="Model")
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - np.sqrt(var_plot)[:, 0], color="C0", alpha=0.6)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.4)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.2)
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
"""
Explanation: <h4 id='bo_intro_model'> The Model </h4>
Now we can start with the BO loop by first fitting a model on the collected data.
The arguably most popular model for BO is a Gaussian process (GP) which defines a probability distribution across classes of functions, typically smooth, such that each linear finite-dimensional restriction is multivariate Gaussian (Rasmussen and Williams, 2006). GPs are fully parametrized by a mean $\mu(x)$ and a covariance function $k(x,x')$. Without loss of generality $\mu(x)$ is assumed to be zero. The covariance function $k(x,x')$ characterizes the smoothness and other properties of $f$. It is known that the kernel of the process and has to be continuous, symmetric and positive definite. A widely used kernel is the squared exponential or RBF kernel: $$ k(x,x') = \theta_0 \cdot \exp{ \left(-\frac{\|x-x'\|^2}{\theta_1}\right)} $$ where $\theta_0$ and and $\theta_1$ are hyperparameters.
To denote that $f$ is a sample from a GP with mean $\mu$ and covariance $k$ we write
$$f(x) \sim \mathcal{GP}(\mu(x),k(x,x')).$$
For regression tasks, the most important feature of GPs is that process priors are conjugate to the likelihood from finitely many observations $y = (y_1,\dots,y_n)^T$ and $X ={x_1,...,x_n}$, $x_i\in \mathcal{X}$ of the form $y_i = f(x_i) + \epsilon_i$ where $\epsilon_i \sim \mathcal{N} (0,\sigma_{noise})$ and we estimate $\sigma_{noise}$ by an additional hyperparameter $\theta_2$.
We obtain the Gaussian posterior $f(x^)|X, y, \theta \sim \mathcal{N}(\mu(x^),\sigma^2(x^))$, where $\mu(x^)$ and $\sigma^2(x^*)$ have a close form. See (Rasmussen and Williams, 2006) for more details.
Note that Gaussian process are also characterized by hyperparameters $\theta = {\theta_0, ... \theta_k}$ such as for instance the kernel lengthscales. For simplicitly we keep these hyperparameters fixed here. However, we usually either optimize or sample these hyperparameters using the marginal loglikelihood of the GP. Of course we could also use any other model that returns a mean $\mu(x)$ and variance $\sigma^2(x)$ on an arbitrary input points $x$ such as Bayesian neural networks or random forests.
End of explanation
"""
from emukit.bayesian_optimization.acquisitions import ExpectedImprovement, NegativeLowerConfidenceBound, ProbabilityOfImprovement
ei_acquisition = ExpectedImprovement(emukit_model)
nlcb_acquisition = NegativeLowerConfidenceBound(emukit_model)
pi_acquisition = ProbabilityOfImprovement(emukit_model)
ei_plot = ei_acquisition.evaluate(x_plot)
nlcb_plot = nlcb_acquisition.evaluate(x_plot)
pi_plot = pi_acquisition.evaluate(x_plot)
plt.figure(figsize=(12, 8))
plt.plot(x_plot, (ei_plot - np.min(ei_plot)) / (np.max(ei_plot) - np.min(ei_plot)), "green", label="EI")
plt.plot(x_plot, (nlcb_plot - np.min(nlcb_plot)) / (np.max(nlcb_plot) - np.min(nlcb_plot)), "purple", label="NLCB")
plt.plot(x_plot, (pi_plot - np.min(pi_plot)) / (np.max(pi_plot) - np.min(pi_plot)), "darkorange", label="PI")
plt.legend(loc=1, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
"""
Explanation: <h4 id='bo_intro_acquisition'> The Acqusition Function </h4>
In the second step of our BO loop we use our model to compute the acquisition function. Various different acquisition functions exist such as :
Probability of Improvement (PI): Given the currently best observed value $y_{\star} \in \operatorname*{arg\:min} {y_0, \ldots, y_n}$, PI simply maximizes
$$
a_{PI}(x) = \Phi(\gamma(x))
$$
where $\gamma(x) = \frac{y_{\star} - \mu(x)}{\sigma(x)}$ and $\Phi$ is the CDF of a standard normal distribution [Jones et al., 1998].
Negative Lower Confidence Bound (NLCB): This acquisition function is based on the famous upper confidence bound bandit strategy [Srinivas et al., 2009]. It maximized the function:
$$
a_{LCB} = - (\mu(x) - \beta \sigma(x))
$$
where $\beta$ is a user-defined hyperparameter that controls exploitation / exploration.
Expected Improvement (EI): Probably the most often used acquisition function is expected improvement [Jones et al., 1998], which computes:
$$
E_{p(f|D)}[\max(y_{\star} - f(x), 0)].
$$
where $y_{\star} \in \operatorname*{arg\:min} {y_0, \ldots, y_n}$. Assuming $p(f|D)$ to be a Gaussian, we can compute EI in closed form by:
$$
\sigma(x)(\gamma(x)\Phi(\gamma(x))) + \phi(\gamma(x))
$$
here $\gamma(x) = \frac{y_{\star} - \mu(x)}{\sigma(x)}$ and $\Phi$ is the CDF and $\phi$ is the PDF of a standard normal distribution.
All of these acquisition function only rely on the model and hence are cheap to evaluate. Furthermore we can easily compute the gradients and use a simple gradient optimization method to find $x_{n+1} \in \operatorname*{arg\:max}_{x \in \mathbb{X}} a(x)$.
End of explanation
"""
from emukit.core.optimization import GradientAcquisitionOptimizer
optimizer = GradientAcquisitionOptimizer(space)
x_new, _ = optimizer.optimize(ei_acquisition)
plt.figure(figsize=(12, 8))
plt.plot(x_plot, (ei_plot - np.min(ei_plot)) / (np.max(ei_plot) - np.min(ei_plot)), "green", label="EI")
plt.axvline(x_new, color="red", label="x_next", linestyle="--")
plt.legend(loc=1, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
"""
Explanation: <h4 id='bo_intro_eval'> Evaluating the objective function </h4>
To find the next point to evaluate we optimize the acquisition function using a standard gradient descent optimizer.
End of explanation
"""
y_new = target_function(x_new)
X = np.append(X_init, x_new, axis=0)
Y = np.append(Y_init, y_new, axis=0)
"""
Explanation: Afterwards we evaluate the true objective function and append it to our initial observations.
End of explanation
"""
emukit_model.set_data(X, Y)
mu_plot, var_plot = emukit_model.predict(x_plot)
plt.figure(figsize=(12, 8))
plt.plot(emukit_model.X, emukit_model.Y, "ro", markersize=10, label="Observations")
plt.plot(x_plot, y_plot, "k", label="Objective Function")
plt.plot(x_plot, mu_plot, "C0", label="Model")
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - np.sqrt(var_plot)[:, 0], color="C0", alpha=0.6)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.4)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.2)
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
"""
Explanation: After updating the model, you can see that the uncertainty about the true objective function in this region decreases and our model becomes more certain.
End of explanation
"""
from emukit.examples.gp_bayesian_optimization.single_objective_bayesian_optimization import GPBayesianOptimization
bo = GPBayesianOptimization(variables_list=[ContinuousParameter('x1', 0, 1)],
X=X_init, Y=Y_init)
bo.run_optimization(target_function, 10)
mu_plot, var_plot = bo.model.predict(x_plot)
plt.figure(figsize=(12, 8))
plt.plot(bo.loop_state.X, bo.loop_state.Y, "ro", markersize=10, label="Observations")
plt.plot(x_plot, y_plot, "k", label="Objective Function")
plt.plot(x_plot, mu_plot, "C0", label="Model")
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - np.sqrt(var_plot)[:, 0], color="C0", alpha=0.6)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.4)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.2)
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
"""
Explanation: 3. Emukit's Bayesian optimization interface
Of course in practice we don't want to implement all of these steps our self. Emukit provides a convenient and flexible interface to apply Bayesian optimization. Below we can see how to run Bayesian optimization on the exact same function for 10 iterations.
End of explanation
"""
|
mercybenzaquen/foundations-homework
|
foundations_hw/05/Homework_5_Spotify_graded.ipynb
|
mit
|
import requests
response = requests.get('https://api.spotify.com/v1/search?q=Lil&type=artist&market=US&limit=50')
Lil = response.json()
print(Lil.keys())
print(type(Lil['artists']))
print(Lil['artists'].keys())
Lil_info = Lil['artists']['items']
print(type(Lil_info))
print(Lil_info[1])
"""
Explanation: graded = 8/8
End of explanation
"""
for every_lil in Lil_info:
print(every_lil['name'], every_lil['popularity'],"Genre:",(", ".join(every_lil['genres'])))
if every_lil['genres'] == []:
print("No genres listed")
"""
Explanation: 1# Do a search and print a list of 50 that are playable in the USA (or the country of your choice), along with their popularity score. What genres are most represented in the search results? Edit your previous printout to also display a list of their genres in the format "GENRE_1, GENRE_2, GENRE_3". If there are no genres, print "No genres listed".
End of explanation
"""
genres = []
for item in Lil_info:
genres = item['genres'] + genres
print(genres)
unique_genres = set(genres)
for every_genre in unique_genres:
print(every_genre , genres.count(every_genre))
#genre_name = set(every_genre)
#genre_number = set(genres.count(every_genre))
#print(genre_name)
#this is the other option I did it with soma's help
from collections import Counter
# genres is a long list of genres with a lot repeats
genre_counter = Counter(genres)
genre_counter
counts = Counter(all_genres)
counts.most_common(1)
genre_counter.most_common(3)
#class review. Other option:
#aggregation problem
all_genres = []
for artist in Lil_info:
#conditional
print('All genres we have', all_genres)
print("Current artist has", artist['genres'])
all_genres = all_genres + artist['genres']
print('!!!!All the genres we have are:')
print(all_genres)
all_genres.count("hip pop")
unique_genre = set(all_genres)
for genre in unique_genres:
genre_count = all_genres.count(genre)
print(genre, "shows up", genre_count, "times")
#we have to convert it into a set
"""
Explanation: 2) What genres are most represented in the search results?
End of explanation
"""
most_popular = 0
artist_name = Lil
for artist_popularity in Lil_info:
if artist_popularity['popularity'] == 86:
most_popular = most_popular
artist_name = artist_name
elif artist_popularity['popularity'] > most_popular:
most_popular = artist_popularity['popularity']
artist_name = artist_popularity['name']
print("Our second most popular artist is", artist_name , "with a popularity score of", most_popular)
most_followers = 0
artist_name = Lil
for artist_followers in Lil_info:
if artist_followers['followers']['total'] > 0:
most_followers = artist_followers['followers']['total']
artist_name = artist_followers['name']
else:
most_followers = most_followers
artist_name = artist_name
print("Our artist with the most followers is not the most popular, it is", artist_name , "with ", most_followers, "followers")
#class review. Soma used ---> artist['name'] != 'Lil Wayne':
second_most_popular_name = ""
second_most_popular_score = 0
for artist in Lil_info:
#this is the conditional
if artist['popularity'] > second_most_popular_score and artist['name'] != 'Lil Wayne':
#these are the changes
second_most_popular_name = artist['name']
second_most_popular_score = artist['popularity']
print(second_most_popular_name,second_most_popular_score)
#class review. he answered the question of what to do if two people have the same popularity scores
target_score = 72
#initial condition
second_best_artist = []
for artist in Lil_info:
print("Looking at", artist['name'], "who has a popularity of", artist['popularity'])
#conditional
if artist['popularity'] == 72:
#change, add new artist to the list where Lil Yatchy is already in. We do that with .append(new thing)
print("!!!!!!!the artist popularity is 72")
second_best_artist.append(artist['name'])
print("Our second best artists are:")
for artist in second_best_artist:
print(artist)
"""
Explanation: 3 Use a for loop to determine who BESIDES Lil Wayne has the highest popularity rating. Is it the same artist who has the largest number of followers?
End of explanation
"""
for artist in Lil_info:
#print("Looking at", artist['name'])
if artist['name'] == "Lil' Kim":
print("Found Lil' Kim")
print(artist['popularity'])
else:
pass #to keep it from breaking
#print("NOT Lil' Kim)
#aggregation problem
more_popular_lil_kim = []
#the loop
for artist in Lil_info:
#the conditional if artist is more popular than lil kim
if artist['popularity'] > 62:
print(artist['name'],"is more popular than Lil' Kim with a score of", artist['popularity'])
more_popular_lil_kim.append(artist['name'])
print(more_popular_lil_kim)
for artist_name in more_popular_lil_kim:
print(artist_name)
more_popular_string = ", ".join(more_popular_lil_kim)
print("artists more popular than lil kim are:", more_popular_string)
"""
Explanation: 4) Print a list of Lil's that are more popular than Lil' Kim.
End of explanation
"""
for two_lil in Lil_info:
print(two_lil['name'], "ID:", two_lil['id'])
import requests
response = requests.get('https://api.spotify.com/v1/artists/6JUnsP7jmvYmdhbg7lTMQj/top-tracks?country=US')
lil_fate = response.json()
print(lil_fate)
print(type(lil_fate))
print(lil_fate.keys())
print(type(lil_fate['tracks']))
lil_fate_top_tracks = lil_fate['tracks']
print(lil_fate_top_tracks)
for top_tracks in lil_fate_top_tracks:
print("Lil Fate top track is/are", top_tracks['name'])
import requests
response = requests.get('https://api.spotify.com/v1/artists/6L3x3if9RVimruryD9LoFb/top-tracks?country=US')
king_lil = response.json()
print(type(king_lil))
print(king_lil.keys())
king_lil_top_tracks = king_lil['tracks']
print(king_lil_top_tracks)
print("King Lil top track is/are:")
for top_tracks in king_lil_top_tracks:
print(top_tracks['name'])
for top_tracks in lil_fate_top_tracks:
print("Lil Fate top track is/are:")
print("-",top_tracks['name'])
print("King Lil top track is/are:")
for top_tracks in king_lil_top_tracks:
print("-",top_tracks['name'])
"""
Explanation: 5) Pick two of your favorite Lils to fight it out, and use their IDs to print out their top tracks.
Tip: You're going to be making two separate requests, be sure you DO NOT save them into the same variable.
End of explanation
"""
import requests
response = requests.get('https://api.spotify.com/v1/artists/6JUnsP7jmvYmdhbg7lTMQj/top-tracks?country=US')
lil_fate = response.json()
explicit_count = 0
non_explicit_count = 0
popularity_explicit = 0
popularity_non_explicit= 0
minutes_explicit = 0
minutes_non_explicit = 0
for track in lil_fate_top_tracks:
if track['explicit'] == True:
explicit_count = explicit_count + 1
popularity_explicit = popularity_explicit + track['popularity']
minutes_explicit = minutes_explicit + track['duration_ms']
print("There is", explicit_count, "explicit track with a popularity of", popularity_explicit)
elif track['explicit'] == False:
non_explicit_count = non_explicit_count + 1
popularity_non_explicit = popularity_non_explicit + track['popularity']
minutes_non_explicit = minutes_non_explicit + track['duration_ms']
print("There is", non_explicit_count ,"non-explicit track with a popularity of", popularity_non_explicit)
print("The average popularity of Lil Fate explicits songs is", popularity_explicit/explicit_count)
#Code below--> dividing it by zero because there are no non-explicit counts so it does not work
#print("The average popularity of Lil Fate non-explicits songs is", popularity_non_explicit/non_explicit_count)
print("Lil fate has", (minutes_explicit / 1000) /60 , "minutes of explicit music") #sounds weird. Not sure why I get this result
print("Lil fate has", (minutes_non_explicit / 1000) / 60, "minutes of non-explicit music")#sounds weird. Not sure why I get this result
import requests
response = requests.get('https://api.spotify.com/v1/artists/6L3x3if9RVimruryD9LoFb/top-tracks?country=US')
king_lil = response.json()
explicit_count = 0
non_explicit_count = 0
popularity_explicit = 0
popularity_non_explicit= 0
minutes_explicit = 0
minutes_non_explicit = 0
for track in king_lil_top_tracks:
if track['explicit'] == True:
explicit_count = explicit_count + 1
popularity_explicit = popularity_explicit + track['popularity']
minutes_explicit = minutes_explicit + track['duration_ms']
print("There is", explicit_count, "explicit track with a popularity of", popularity_explicit)
elif track['explicit'] == False:
non_explicit_count = non_explicit_count + 1
popularity_non_explicit = popularity_non_explicit + track['popularity']
minutes_non_explicit = minutes_non_explicit + track['duration_ms']
print("There is", non_explicit_count ,"non-explicit track with a popularity of", popularity_non_explicit)
print("The average popularity of King lil explicits songs is", popularity_explicit/explicit_count)
print("The average popularity of King lil non-explicits songs is", popularity_non_explicit/non_explicit_count)
print("Lil King has", (minutes_explicit / 1000) / 60, "minutes of explicit music") #this number does not make sense but not sure what is wrong
print("Lil King has", (minutes_non_explicit /1000) / 60, "minutes of non-explicit music")#sounds weird. Not sure why I get this result
"""
Explanation: 6) Will the world explode if a musicians swears? Get an average popularity for their explicit songs vs. their non-explicit songs. How many minutes of explicit songs do they have? Non-explicit?
End of explanation
"""
response = requests.get('https://api.spotify.com/v1/search?q=Lil&type=artist&market=US')
all_lil = response.json()
print(all_lil.keys())
print(all_lil['artists'].keys())
print(all_lil['artists']['total'])
response = requests.get('https://api.spotify.com/v1/search?q=Biggie&type=artist&market=US')
all_biggies = response.json()
print(all_biggies['artists']['total'])
"""
Explanation: 7) Since we're talking about Lils, what about Biggies? How many total "Biggie" artists are there? How many total "Lil"s?
End of explanation
"""
response = requests.get('https://api.spotify.com/v1/search?q=Lil&type=artist&market=US')
all_lil = response.json()
total_lils= all_lil['artists']['total']
#print(total_lils)
print("It would take",(total_lils/20) * 5, "to download all the Lils")
response = requests.get('https://api.spotify.com/v1/search?q=Biggie&type=artist&market=US')
all_biggies = response.json()
total_biggies= all_biggies['artists']['total']
#print(total_biggies)
print("It would take",(total_biggies/20) * 5, "to download all the Lils")
"""
Explanation: 7.2) If you made 1 request every 5 seconds, how long would it take to download information on all the Lils vs the Biggies?
End of explanation
"""
response = requests.get('https://api.spotify.com/v1/search?q=Biggie&type=artist&market=US&limit=50')
biggies = response.json()
print(biggies['artists'].keys())
print(biggies['artists']['items'][0])
import math
biggies_info = biggies['artists']['items']
biggie_total= 0
biggie_popularity= 0
for biggie in biggies_info:
biggie_total= biggie_total + 1
biggie_popularity = biggie_popularity + biggie['popularity']
print("The top 50 Biggies have an average score of popularity of", math.ceil(biggie_popularity / biggie_total))
import requests
response = requests.get('https://api.spotify.com/v1/search?q=Lil&type=artist&market=US&limit=50')
Lil = response.json()
import math
Lil_info = Lil['artists']['items']
lil_total= 0
lil_popularity= 0
for lil in Lil_info:
lil_total= lil_total + 1
lil_popularity = lil_popularity + lil['popularity']
print("The top 50 Lils have an average score of popularity of", math.ceil(lil_popularity / lil_total))
"""
Explanation: 8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average?
End of explanation
"""
#This is the link to my graphic of the various popularities.
#https://infogr.am/fd7c85b9-f59b-498d-829d-a6ead4fb6862
"""
Explanation: Lils Graphic
End of explanation
"""
|
jeroarenas/MLBigData
|
5_RecommenderSystems/Recommender systems - Part 2-Students.ipynb
|
mit
|
# Import some libraries
import numpy as np
import math
from test_helper import Test
# Define data file
ratingsFilename = 'u.data'
# Read data with spark
rawRatings = sc.textFile(ratingsFilename)
# Check file format
print rawRatings.take(10)
"""
Explanation: Recommender Systems in Spark
Recommender Systems are a set of methods able to predict the 'rating' or 'preference' that a user would give to an item. Among the different approaches to design this kind of systems, in this lab session we are going to work with Collaborative Filtering (CF) approaches. However, unlike previous lab session, we are going to work with distributed implementations based on Spark.
Along the notebook we are going to use the dataset from MovieLens. MovieLens data sets were collected by the GroupLens Research Project at the University of Minnesota. The original version of this problem contains 10 millions of ratings applied to 10681 movies by 71567 users. However, for this lab, we will use a reduced version consisting of 100,000 ratings (with values from 1 to 5) from 943 users on 1682 movies, where each user has rated, at least, 20 movies.
As you progress in this notebook, you will have to complete some exercises. Each exercise includes an explanation of what is expected, followed by code cells where one or several lines will contain <FILL IN>. The cell that needs to be modified will have # TODO: Replace <FILL IN> with appropriate code on its first line. Once the <FILL IN> sections are updated, the code can be run; below this cell, you will find the test cell (beginning with the line # TEST CELL) and you can run it to verify the correctness of your solution.
Read data and preprocessing
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
def format_ratings(line):
""" Parse a line in the ratings dataset
Args:
line (str): a line in the ratings dataset in the form of UserID \t MovieID \t Rating \t Timestamp \n
Returns:
tuple: (UserID, MovieID, Rating)
"""
# Divide each line with the character '\t'
items = # FILL
# Get UserID and convert it to int
user_id = # FILL
# Get ItemID and convert it to int
item_id = # FILL
# Get Reating and convert it to float
rating_id = # FILL
# Return UserID, ItemID and Rating.
return # FILL
###########################################################
# TEST CELL
###########################################################
check_line = u'196\t242\t3\t881250949'
check_tuple = format_ratings(check_line)
Test.assertEquals(check_tuple, (196, 242, 3), 'incorrect result: data are incorrectly formatted')
"""
Explanation: Formatting the data
As you can checked, each line is formatted as:
UserID \t MovieID \t Rating \t Timestamp \n. So, let's convert each line to a list with the fields [UserID, MovieID, Rating] (we drop the timestamp because we do not need it for this exercise).
In order to work in a distributed way, let's start implementing a function (format_rating) that let's convert each line into the desired format. Then, we can call this function from our RDD with a map method to apply it over each line.
Tip: Check the Python function split() to convert each line into a list of items split by a given character.
1. Create function format_ratings( )
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Convert each line of rawRatings
ratingsRDD = # FILL IN
# Show the output
print ratingsRDD.take(10)
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(ratingsRDD.first(), (196, 242, 3), 'incorrect result: data are incorrectly formatted')
"""
Explanation: 2. Format your data
Convert RDD rawRatings into a new RRD where each line has been transformed with the function format_ratings().
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
trainingRDD, testRDD = ratingsRDD.randomSplit(#FILL IN, seed=0L)
print 'Training: %s, test: %s\n' % (trainingRDD.count(), testRDD.count())
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(trainingRDD.count(), 75008, 'incorrect result: number of training ratings is incorrect')
Test.assertEquals(testRDD.count(), 24992, 'incorrect result: number of test ratings is incorrect')
Test.assertEquals(trainingRDD.first(), (186, 302, 3.0), 'incorrect result: the values of the training RDD are incorrect')
Test.assertEquals(testRDD.first(), (196, 242, 3.0), 'incorrect result: the values of the testing RDD are incorrect')
"""
Explanation: Creating training and test rating matrices
Now, to be able to train and evaluate the different methods, let's divide the rating matrix into two different matrix:
* one of them with the 75% of the ratings for training the different recommenders;
* other one, with the remaining 25%, for testing purposes.
Hint: you can apply the randomSplit() method of the RDD to divide it at random.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
def getAverages(IDandRatingsTuple):
""" Calculate average rating
Args:
IDandRatingsTuple: a single tuple of (ID_user, (Rating1, Rating2, Rating3, ...))
Returns:
tuple: a tuple of (ID_user, averageRating)
"""
id_user = # FILL IN
mean_value = # FILL IN
return (id_user, mean_value)
###########################################################
# TEST CELL
###########################################################
check_ratings = (0, iter(2, 5, 3, 1, 2))
check_output = getAverages(check_ratings)
Test.assertEquals(check_output, (0, 2.6), 'incorrect result: check_output is incorrect')
"""
Explanation: Baseline recommender
In this section we are going to build a mean based baseline; that is, the recommender will predict new ratings as the average value of the ratings given by this user to previous rated items.
To design this approach, let's start building a function that, given a user_id and all its associated ratings, is able to compute the average value of all the ratings.
1. Build function getAverages( )
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# From ratingsRDD with tuples of (UserID, MovieID, Rating) create an RDD with tuples of
# the (UserID, Rating), i.e, remove the MovieID field.
RDD_users_ratings = trainingRDD.# FILL IN
# From the RDD of (UserID, Rating) create an RDD with tuples of
# (UserID, iterable of Ratings for that UserID), where iterable of Ratings for that UserID has
# all the rated items of UserID. Review groupByKey() method of RDD elements.
RDD_users_allratings = RDD_users_ratings.# FILL IN
# Using getAverages(), compute the average rating of each user.
RDD_users_mean = RDD_users_allratings.# FILL IN
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(RDD_users_ratings.first(), (186, 3.0), 'incorrect result: RDD_users_ratings is incorrect')
Test.assertEquals(list(RDD_users_allratings.first()[1])[:5], [4.0, 5.0, 4.0, 3.0, 3.0], 'incorrect result: RDD_users_allratings is incorrect')
Test.assertEquals(np.round(RDD_users_mean.first()[1],2), 3.69, 'incorrect result: RDD_users_mean is incorrect')
"""
Explanation: 2. Compute the average rating of each user
For the next step, let's use the getAverages( ) function to compute the average rating of all the users in a distributed way.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Create a new RDD, RDD_test_ids, consisting of (UserID, MovieID) pairs
# that you extract from testRDD. That is, remove the filed Rating from testRDD.
RDD_test_ids = testRDD.# FILL IN
# Using the user_id as key, join RDD_test_ids with RDD_users_mean.
# Review the method leftOuterJoin() of RRD elements.
RDD_test_ids_mean = RDD_test_ids.# FILL IN
# Note that the resulting RRD provided by leftOuterJoin() method has the format
# (Iduser, (IdItem, PredRating)). Remap it to create a RDD with tuples (Iduser, IdItem, PredRating)
RDD_pred_mean = RDD_test_ids_mean.# FILL IN
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(RDD_test_ids.first(), (196, 242), 'incorrect result: RDD_test_ids is incorrect')
Test.assertEquals(RDD_test_ids_mean.first(), (512, (23, 4.294117647058823)), 'incorrect result: RDD_test_ids_mean is incorrect')
Test.assertEquals(RDD_pred_mean.first(), (512, 23, 4.294117647058823), 'incorrect result: RDD_pred_mean is incorrect')
"""
Explanation: 3. Make new predictions
Now, let's make predictions for our test data. So, for each pair (user, item) of testRDD, we will have to compute the predicted rating, which will be given by the average rating of the corresponding user.
End of explanation
"""
def get_RMSE(predictedRDD, actualRDD):
""" Compute the root mean squared error between two RDD with the predicted and actual ratings
Args:
predictedRDD: predicted ratings for each movie and each user where each entry is in the form
(UserID, MovieID, Rating)
actualRDD: actual ratings where each entry is in the form (UserID, MovieID, Rating)
Returns:
RSME (float): computed RSME value
"""
# Transform predictedRDD into the tuples of the form ((UserID, MovieID), Rating)
predictedReformattedRDD = predictedRDD.map(lambda x: ((x[0],x[1]),x[2]))
# Transform actualRDD into the tuples of the form ((UserID, MovieID), Rating)
actualReformattedRDD = actualRDD.map(lambda x: ((x[0],x[1]),x[2]))
# Compute the squared error for each matching entry (i.e., the same (User ID, Movie ID) in each
# RDD) in the reformatted RDDs using RDD transformtions - do not use collect()
squaredErrorsRDD = (predictedReformattedRDD.join(actualReformattedRDD).map(lambda x: pow(x[1][0]-x[1][1],2)))
# Compute the total squared error - do not use collect()
totalError = squaredErrorsRDD.reduce(lambda a,b: a+b)
# Count the number of entries for which you computed the total squared error
numRatings = squaredErrorsRDD.count()
# Using the total squared error and the number of entries, compute the RSME
return math.sqrt(float(totalError)/numRatings )
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Create a function to compute the MAE error
def get_MAE(predictedRDD, actualRDD):
""" Compute the mean absolute error between predicted and actual
Args:
predictedRDD: predicted ratings for each movie and each user where each entry is in the form
(UserID, MovieID, Rating)
actualRDD: actual ratings where each entry is in the form (UserID, MovieID, Rating)
Returns:
MAE (float): computed MAE value
"""
# Transform predictedRDD into the tuples of the form ((UserID, MovieID), Rating)
predictedReformattedRDD = # FILL IN
# Transform actualRDD into the tuples of the form ((UserID, MovieID), Rating)
actualReformattedRDD = # FILL IN
# Compute the mean absolute error for each matching entry (i.e., the same (User ID, Movie ID) in each
# RDD) in the reformatted RDDs using RDD transformtions - do not use collect()
AbsoluteErrorsRDD = # FILL IN
# Compute the total absolute error - do not use collect()
totalError = # FILL IN
# Count the number of entries for which you computed the total absolute error
numRatings = # FILL IN
# Using the total squared error and the number of entries, compute the MAE
return # FILL IN
###########################################################
# TEST CELL
###########################################################
check_Predicted = sc.parallelize([(0, 0, 5), (0, 1, 3)])
check_Actual = sc.parallelize([(0, 0, 3), (0, 1, 2)])
Test.assertEquals(get_MAE(check_Predicted, check_Actual), 1.5, 'incorrect result: function get_MAE() is incorrect')
"""
Explanation: 4. Performance evaluation
Finally, let's evaluate the goodness of the computed predictions over the test data. To evaluate it, we are going to use two measurements:
* The Mean Average Error:
$$MAE = \frac{1}{N} \sum_{i=1}^N |p_i -r_i|$$
* The Root Mean Square Error:
$$ RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (p_i -r_i)^2}$$
The next cell contains a function that given two RDDs, the first with the predicted ratings and the second with the real rating values, is able to compute the RMSE value. Use it as example to create a new function able to calculate the MAE value.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Compute the MAE error for each mean based baseline
MAE_mean = # FILL IN
# Compute the RMSE error for each mean based baseline
RMSE_mean = # FILL IN
print 'Mean model ... MAE: %2.2f , RMSE: %2.2f ' % (MAE_mean, RMSE_mean)
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(np.round(MAE_mean,2), 0.83, 'incorrect result: MAE value of mean recommeder is incorrect')
Test.assertEquals(np.round(RMSE_mean,2), 1.04, 'incorrect result: RMSE value of mean recommeder is incorrect')
"""
Explanation: Now, let's evaluate the performance of the mean based baseline.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
from pyspark.mllib.recommendation import ALS
# Define parameters
n_latent_factors = 5
numIterations = 15
# Train the model (set seed=0L)
sc.setCheckpointDir('checkpoint/')
model = # FILL IN , seed=0L)
"""
Explanation: Alternative Least Squares algorithm
Now, let's work with the ALS algorithm. As you know, this method tries to approximate the ratings matrix by factorizing it as the product of two matrices:
$$ R = X * Y $$
where $X$ describes properties of each user, and $Y$ describes properties of each item. These two matrices are known as latent factors, since they are a low-dimension representation of users and items.
If we examine the utilities of the MLLib, we can find a implementation of the ALS algorithm. So, in this section, we will learn to use this module of the MLlib.
Training a ALS model
This library includes the method ALS.train( ) which directly allows us training an ALS model. Use this function to train a recommender system.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Create a new RDD, RDD_test_ids, consisting of (UserID, MovieID) pairs
# that you extract from testRDD. That is, remove the filed Rating from testRDD.
RDD_test_ids = # FILL IN
# Estimate their ratings with model.predictAll( )
predictions = # FILL IN
# Print the first 10 predictions
predictions.take(10)
###########################################################
# TEST CELL
###########################################################
check_predictions = predictions.filter(lambda x: (x[0]==621) & (x[1]==68)).first()
Test.assertEquals(np.round(check_predictions[2],1), 3.7, 'incorrect result: predicted value is incorrect')
check_predictions = predictions.filter(lambda x: (x[0]==880) & (x[1]==8)).first()
Test.assertEquals(np.round(check_predictions[2],1), 4, 'incorrect result: predicted value is incorrect')
"""
Explanation: Computing predictions
Once the model has been trained, let's make the recommendations. For this purpose, the ALS model has a method model.predictAll(testdata) which estimates the ratings over an RDD of ID pairs (userID, itemID).
So, complete the next cell to estimate the rating over the pairs of (users, items) of our test data.
End of explanation
"""
x = predictions.first()
print 'User ID: ' + str(x[0])
print 'Item ID: ' + str(x[1])
print 'Predicted rating: ' + str(x[2])
"""
Explanation: Note that, although each element of the RDD predictions is an object, you can extract the UserID, ItemID and predicted rating accessing to its first, second, and third element, respectively. See the example of the next cell...
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
user_id = 10
# Select the outputs of the user_id=10 (hint: filter method)
predictions_userid = # FILL IN
# Sort the outputs according to rating field (hint: sortBy method)
predictions_userid_sorted = # FILL IN
predictions_userid_sorted.take(5)
###########################################################
# TEST CELL
###########################################################
check_output = predictions_userid_sorted.map(lambda x:x[1]).take(5)
Test.assertEquals(check_output, [483, 127, 174, 701, 185], 'incorrect result: recommeded items are incorrect')
"""
Explanation: Advance work
Which ones are the 5 top ranked items for the user with id= 10?
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Compute the MAE error
MAE_als = # FILL IN
# Compute the RMSE error
RMSE_als = # FILL IN
print 'ALS model ... MAE: %2.2f , RMSE: %2.2f ' % (MAE_als, RMSE_als)
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(np.round(MAE_als,2), 0.77, 'incorrect result: MAE value of ALS recommeder is incorrect')
Test.assertEquals(np.round(RMSE_als,2), 1.01, 'incorrect result: RMSE value of ALS recommeder is incorrect')
"""
Explanation: Performance evaluation
Finally, let's evaluate the model performance over the test data using the get_MAE( ) and get_RMSE( ) functions.
End of explanation
"""
def compute_Pearson_correlation(ratings_u1, ratings_u2, n_items_th = 1):
""" Calculate correlation coefficient
Args:
ratings_u1: Iduser, a pyspark iterable with tuples (item, rating) with all the ratings of user 1
ratings_u2: Iduser, a pyspark iterable with tuples (item, rating) with all the ratings of user 2
n_items_th: number of common items that both users have to be rated to compute its similarity.
If the users have less than n_items_th common rated items, its similarity is set to zero.
By default, n_items_th is set to 1.
Returns:
corr_value: correlation coefficient
"""
# Get the items and values rated by user 1
[items_u1, values_u1] = zip(*list(ratings_u1[1]))
# Get the items and values rated by user 2
[items_u2, values_u2] = zip(*list(ratings_u2[1]))
# Get the set of items rated by both users and their values
r_u1 = [values_u1[i] for i, item in enumerate(items_u1) if item in items_u2]
r_u2 = [values_u2[i] for i, item in enumerate(items_u2) if item in items_u1]
if len(r_u1)>= n_items_th: # If the are common rated items...
# Compute the means of the user ratings
m_1 = np.mean(np.array(values_u1))
m_2 = np.mean(np.array(values_u2))
# Remove their means
r_u1 = r_u1 - m_1
r_u2 = r_u2 - m_2
# Compute the correlation coefficient
corr_value = np.dot(r_u1,r_u2.T)/(np.sqrt(np.dot(r_u1,r_u1.T))*np.sqrt(np.dot(r_u2,r_u2.T)))
# Remove useless dimensions
corr_value =np.squeeze(corr_value)
else: # Else correlation is 0
corr_value = 0
# Checking that the correlation is not NaN (this would happen if the denominatior is 0),
# in this case, set the correlation coefficient to 0
if math.isnan(corr_value):
corr_value = 0
return corr_value
"""
Explanation: Advance work: User based recommendations
In this last section, we are going to implement a user-based collaborative filtering system on Spark.
As you know, the general algorithm has two steps:
1. Computing the similarity of each user to the remaining ones and select those with similarity larger than zero or than a given threshold.
2. Estimating the ratings of an items for a given user. In this case, we will have to average the ratings of the neighbors of this user.
To make easier this implementation, let's start precomputing all the similarities and we will use them in the second step.
Step 1. Training the system: Finding similar users
In the next cell, you are given a function to compute the Pearson correlation coefficient defined as:
$$ sim(user_a, user_b) = \frac{\sum_{p \in P} (r_{a,p} -\bar{r}a)(r{b,p} -\bar{r}b)}
{\sqrt{ \sum{p \in P} (r_{a,p} -\bar{r}a)^2} \sqrt{ \sum{p \in P} (r_{b,p} -\bar{r}_b)^2}}$$
where $P$ is set of items rated for both users a and b, $r_{u,p}$ is the rating of the user u to item p, and $\bar{r}_u$ is the mean value of the all the ratings of the user u.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# 1. From trainingRDD, create an RDD where each element is (userID, (ItemID, rating)), i.e,
# the userID is the key and the pair (ItemID, rating) is the value.
RDD_users_as_key = #FILL IN
# 2. Group the elements of RDD_users_as_key by key (see groupByKey() method)
# Each element of this new RDD is (userID, spark-iterable), where the spark iterable has
# a list with all the rated items elements (ItemID, rating)
RDD_users_ratings = #FILL IN
# 3. Extract the spark-iterable element with all the ratings of users 1 and 2
id_u1 = 1
ratings_u1 = #FILL IN
id_u2 = 2
ratings_u2 = #FILL IN
# 4. Compute its similarity
n_items_th = 4
compute_Pearson_correlation(ratings_u1, ratings_u2, n_items_th)
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(np.round(similarity,2), 0.80, 'incorrect result: similarity value is incorrect')
"""
Explanation: Now, complete the next cell to be able to evaluate the function compute_Pearson_correlation( ).
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# 1. From trainingRDD, create an new RDD with elements (userID, spark-iterable), where
# spark iterable has a list [(ItemID, rating), ...] with all the items rated by UserID
# (see previous section)
RDD_users_ratings = # FILL IN
# 2. Create all the combinations of pairs with the users (see cartesian method of RDD elements)
# Note that cartesian returns an RDD with elements ((id_1, iterable_ratings 1), (id_2, iterable_ratings 2))
pairs_users = # FILL IN
# 3. Compute correlation values with the function compute_Pearson_correlation()
n_items_th = 4
correlation_values = # FILL IN
# 4. Select correlation values larger than the similarity threshold (filter method)
sim_th = 0.2
correlation_values_sel = # FILL IN
# 5. Let's reorganize each element of the RDD to get user 1 as key and the tuple
# (user2, similarity) as value
all_correlations_with_userid = # FILL IN
# 6. Group the elements of all_correlations_with_userid by key (groupByKey() method)
# Each element of this new RDD is (userID, spark-iterable), where the spark iterable has
# a list with all the similar users (UserID, similarity)
RDD_sim_users = # FILL IN
RDD_sim_users.cache()
###########################################################
# TEST CELL
###########################################################
id_user = 1
sim_user1 = RDD_sim_users.filter(lambda x : x[0]==id_user).first()
sim_check = sc.parallelize(list(sim_user1[1]))
Test.assertEquals(np.round(sim_check.filter(lambda x: x[0] == 22).first()[1],2), 0.34, 'incorrect result: similarity value is incorrect')
Test.assertEquals(np.round(sim_check.filter(lambda x: x[0] == 120).first()[1],2), 0.37, 'incorrect result: similarity value is incorrect')
"""
Explanation: Once we can compute similarities between two users, let's compute, for each user, its distance with all the remaining users. The output of this cell will be an RDD of similarities where each element is (UserID, spark-iterable), where spark-iterable is a iterable list with pairs of (UserID, similarity).
Note that it is enough if this list only saves the users with a similarity larger than zero or larger than a given threshold.
End of explanation
"""
def compute_predictions(med_user, list_sim, list_ratings):
""" Estimate the rating that a user u would assign over a item i
Args:
med_user: average rating of the user u
list_sim: list of tuples (id_user, similarity) with the users who are
similar to the user u and its similarity value
list_rantings: list of tuples (id_user, rating) with the ratings that the remaining
users have already assigned to the item i. Note that the rating values are normalized
(the average rating of the corresponding user has been previously subtracted so that
this function implements the above expression)
Returns:
pred_value: estimated rating for the user u to the item i
"""
if (list_sim is not None) & (list_ratings is not None):
dict1 = dict(list_sim)
dict2 = dict(list_ratings)
list_intersect = [(k, dict1[k], dict2[k]) for k in sorted(dict1) if k in dict2]
# We have build a list with: (user_id_similar, sim_value, rating_user_sim)
if len(list_intersect)>0:
aux = [(sim*rat, sim) for (id_user, sim, rat) in list_intersect]
numerator, denominator = zip(*aux)
pred_value = med_user + sum(numerator)/sum(denominator)
else:
pred_value = med_user
else:
pred_value = med_user
return pred_value
"""
Explanation: Step 2. Making predictions
Once you know how a user is similar to other users, you would like to know which items should be recommended for this user.
For this purpose, we have to assign a rating to each item by averaging the ratings that the similar users have given to that item according to this expression:
$$ pred(user_a, item_i) = \bar{r_a} + \frac{\sum_{b \in N} sim(user_a, user_b) * (r_{b,i}- \bar{r_b})}{\sum_{b \in N} sim(user_a, user_b)}$$
where N is the number of neighbors of user a ($sim >sim_th$) which have rated item i.
Next cell contains the necessary code to compute the above expression given the average rating of a user. Review this function, paying special attention to the input parameters.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# 1. From trainingRDD create a new RDD with the fields (user, rating), and convert it to
# (user, list_ratings). Hint: GroupByKey()
RDD_users_ratings = # FILL IN
# Convert this RDD (user, list_ratings) -> (user, mean_user). Use getAverages() function
RDD_users_mean = # FILL IN
###########################################################
# TEST CELL
###########################################################
id_user = 1
mean_user1 = RDD_users_mean.filter(lambda x : x[0]==id_user).first()
Test.assertEquals(np.round(mean_user1[1],2), 3.6, 'incorrect result: mean rating value is incorrect')
"""
Explanation: To obtain the predicted outputs for the test data and evaluate the performance of the user based recommender, we need to compute all the needed input arguments. Follow the steps of the next sections to obtain them.
1. Computing the average rating of each user
Please, review section baseline recommender (Subsection 2).
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# 2.1. Create an RDD with training ratings subtracting the users' mean
# Create an RDD with elements (user, (item, rating))
trainingRDD_aux = # FILL IN
# Combine it with the users_mean -> (user, ((item, rating), user_mean))
# Hint: leftOuterJoin()
trainingRDD_mean = # FILL IN
# Create a new RDD subtracting the mean of each rating and reorganize it -> (user, item, rating_norm)
trainingRDD_norm = # FILL IN
# 2.2. Create an RDD with normalized training ratings with the form (item, list((user, rating)))
RDD_tratings_item = # FILL IN
###########################################################
# TEST CELL
###########################################################
id_item = 22
ratings_item = RDD_ratings_item.filter(lambda x : x[0]==id_item).first()
ratings_check = sc.parallelize(list(ratings_item[1]))
Test.assertEquals(np.round(ratings_check.filter(lambda x: x[0] == 608).first()[1],2), 0.26, 'incorrect result: rating value is incorrect')
Test.assertEquals(np.round(ratings_check.filter(lambda x: x[0] == 184).first()[1],2), -0.66, 'incorrect result: rating value is incorrect')
"""
Explanation: 2. Create a list of ratings
Here, you should create a new RDD with one element for each item, where each element is given by (item_id, list_ratings), where list of ratings has a set of tuples (user_id, rating) with the id of the user who has rated item_id and the assigned rating.
Besides, the ratings of the list have to be normalized (subtracting its corresponding user average rating).
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# 3.1 Create an input RDD, testForPredictingRDD, consisting of (UserID, MovieID) pairs
# that you extract from testRDD (i.e., remove the field rating)
RDD_test_ids = # FILL IN
# 3.2 Combine RDD_test_ids with RDD_users_mean to create an RDD (user, (item, mean_user))
# Hint: leftOuterJoin()
RDD_test_ids_mean = # FILL IN
# 3.3 Combine RDD_test_ids_mean with RDD_sim_users to create an RDD with elements
# (user, ((item, mean_user), list_sim_user)). Hint: leftOuterJoin()
# Next, reformat it to obtain elements (item, (user, mean_user, list_sim_user))
RDD_test_ids_sim = # FILL IN
# 3.4 Combine RDD_test_ids_sim with RDD_ratings_item to create an RDD with elements
# (item, ((user, mean_user , list_sim_user), list_item_rating)). Hint: leftOuterJoin()
# Next, reformat it to obatian elements ((user, item), mean_user, list_sim_user, list_item_rating)
RDD_test_ids_sim_rat = # FILL IN
###########################################################
# TEST CELL
###########################################################
check_out = RDD_test_ids_sim_rat.filter(lambda x: x[0]==(218, 516)).first()
Test.assertEquals(np.round(check_out[1],2), 3.62, 'incorrect result: mean value of the RDD is incorrect')
sim_check = sc.parallelize(list(check_out[2]))
Test.assertEquals(np.round(sim_check.filter(lambda x: x[0] == 24).first()[1],2), 0.31, 'incorrect result: similarity value is incorrect')
rating_check = sc.parallelize(list(check_out[3]))
Test.assertEquals(np.round(rating_check.filter(lambda x: x[0] == 308).first()[1],2), 0.23, 'incorrect result: rating value is incorrect')
"""
Explanation: 3. Combine previous RDDs
Until now, we have these RDDs:
* RDD_sim_users
* RDD_users_mean
* RDD_ratings_item
To make the predictions over the test data we need for each pair (userID, itemID) of the test ratings, we need to build an element containing:
- the average rating of userID
- the similar users of userID
- the list of rating of itemID
so that we can call the compute_predictions function with the corresponding input parameters.
Then, here, we are going to combine the above RDD to create a new RDD with elements given by:
((userID, itemID), average_rating_userID, list_similar_users_to_userID, list_ratings_itemID)
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# For each element of RDD_test_ids_sim_rat call to compute_predictions and create a new RDD
# with elements ((user, item), predicted value)
RDD_outputs = # FILL IN
RDD_predictions = # FILL IN
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(np.round(RDD_predictions.filter(lambda x: (x[0], x[1]) == (840, 516)).first()[2],2), 4.8, 'incorrect result: predicted value is incorrect')
Test.assertEquals(np.round(RDD_predictions.filter(lambda x: (x[0], x[1]) == (174, 1032)).first()[2],2), 3.28, 'incorrect result: predicted value is incorrect')
Test.assertEquals(np.round(RDD_predictions.filter(lambda x: (x[0], x[1]) == (896, 12)).first()[2],2), 3.83, 'incorrect result: predicted value is incorrect')
Test.assertEquals(np.round(RDD_predictions.filter(lambda x: (x[0], x[1]) == (59, 528)).first()[2],2), 4.18, 'incorrect result: predicted value is incorrect')
"""
Explanation: 4. Compute predictions
Complete the next cell to use RDD_test_ids_sim_rat elements as inputs of the function compute_predictions( ) and obtain the predicted ratings over the test data.
End of explanation
"""
# Compute the error MAE
MAE = get_MAE(RDD_predictions, testRDD)
# Compute the error RMSE
RMSE = get_RMSE(RDD_predictions, testRDD)
print 'User based model ... MAE: %2.2f , RMSE: %2.2f ' % (MAE, RMSE)
###########################################################
# TEST CELL
###########################################################
Test.assertEquals(np.round(MAE,2), 0.80, 'incorrect result: MAE value is incorrect')
Test.assertEquals(np.round(RMSE,2), 1.02, 'incorrect result: RMSE value is incorrect')
"""
Explanation: 5. Evaluate performance
End of explanation
"""
|
amitkaps/hackermath
|
Module_2f_ABTesting.ipynb
|
mit
|
#import the necessary datasets
import pandas as pd
import numpy as np
pd.__version__
!pip install xlrd
#Read the dataset
shoes_before = pd.read_excel("data/shoe_sales_before.xlsx")
shoes_during = pd.read_excel("data/shoe_sales_during.xlsx")
shoes_after = pd.read_excel("data/shoe_sales_after.xlsx")
shoes_before.head()
#What was the mean sales
#before, during and after for the two shoe models
#Hint: use df.mean function
print("Before Campaign:")
print(shoes_before.mean())
print()
print("During Campaign:")
print(shoes_during.mean())
print()
print("After Campaign:")
print(shoes_after.mean())
"""
Explanation: A/B Testing
A shoe company sells two models: UA101 and UA102. They wanted to improve their sales and so, ran an aggressive campaign.
The sales data before the campaign, during the campaign and after the campaign are provided.
Can you help them figure out if the campaign was successful or not? What additional insights will you provide them ?
End of explanation
"""
shoes_before.head()
import matplotlib.pyplot as plt
%matplotlib inline
#Find the standard deviation of the sales
print("Before Campaign:")
print(shoes_before.std())
print()
print("During Campaign:")
print(shoes_during.std())
print()
print("After Campaign:")
print(shoes_after.std())
"""
Explanation: Variance
Once two statistician of height 4 feet and 5 feet have to cross a river of AVERAGE depth 3 feet. Meanwhile, a third person comes and said, "what are you waiting for? You can easily cross the river"
It's the average distance of the data values from the mean
<img style="float: left;" src="img/variance.png" height="320" width="320">
<br>
<br>
Standard Deviation
It is the square root of variance. This will have the same units as the data and mean.
End of explanation
"""
#Find the covariance of the sales
#Use the cov function
print("Before Campaign:")
print(shoes_before.cov())
print()
print("During Campaign:")
print(shoes_during.cov())
print()
print("After Campaign:")
print(shoes_after.cov())
"""
Explanation: Co-variance
covariance as a measure of the (average) co-variation between two variables, say x and y. Covariance describes both how far the variables are spread out, and the nature of their relationship, Covariance is a measure of how much two variables change together. Compare this to Variance, which is just the range over which one measure (or variable) varies.
<img style="float: left;" src="img/covariance.png" height="270" width="270">
<br>
<br>
<br>
<br>
End of explanation
"""
import seaborn as sns
sns.jointplot(x= "UA101", y ="UA102", data=shoes_before)
sns.jointplot(x= "UA101", y ="UA102", data=shoes_during)
#Find correlation between sales
print("Before Campaign:")
print(shoes_before.corr())
print()
print("During Campaign:")
print(shoes_during.corr())
print()
print("After Campaign:")
print(shoes_after.corr())
"""
Explanation: Correlation
Extent to which two or more variables fluctuate together. A positive correlation indicates the extent to which those variables increase or decrease in parallel; a negative correlation indicates the extent to which one variable increases as the other decreases.
<img style="float: left;" src="img/correlation.gif" height="270" width="270">
<br>
<br>
<br>
End of explanation
"""
#Let's do some analysis on UA101 now
#Find difference between mean sales before and after the campaign
np.mean(shoes_after.UA101) - np.mean(shoes_before.UA101)
"""
Explanation: Correlation != Causation
correlation between two variables does not necessarily imply that one causes the other.
<img style="float: left;" src="img/correlation_not_causation.gif" height="570" width="570">
End of explanation
"""
#Find %increase in mean sales
(np.mean(shoes_after.UA101) - np.mean(shoes_before.UA101))/np.mean(shoes_after.UA101) * 100
"""
Explanation: On average, the sales after campaign is more than the sales before campaign. But is the difference legit? Could it be due to chance?
Classical Method : t-test
Hacker's Method : provided in notebook 2c
Effect Size
Because you can't argue with all the fools in the world. It's easier to let them have their way, then trick them when they're not paying attention - Christopher Paolini
End of explanation
"""
from scipy import stats
#Standard error for mean sales before campaign
stats.sem(shoes_before.UA101)
"""
Explanation: Would business feel comfortable spending millions of dollars if the increase is going to be 25%.
Does it work for the company?
Maybe yes - if margins are good and this increase is considered good. But if the returns from the campaign does not let the company break even, it makes no sense to take that path.
Someone tells you the result is statistically significant. The first question you should ask?
How large is the effect?
To answer such a question, we will make use of the concept confidence interval
In plain english, confidence interval is the range of values the measurement metric is going to take.
An example would be: 90% of the times, the increase in average sales (before and after campaign) would be within the bucket 3.4 and 6.7 (These numbers are illustrative. We will derive those numbers below)
Hacker's way to do this: Bootstrapping
(We will use the library here though)
Where do we go from here?
First of all there are two points to be made.
Whey do we need signficance testing if confidence intervals can provide us more information?
How does it relate to the traditional statistical procedure of finding confidence intervals
For the first one:
What if sales in the first month after campaign was 80 and the month before campaign was 40. The difference is 40. And confidence interval,as explained above, using replacements, would always generate 40. But if we do the significance testing, as detailed above - where the labels are shuffled, the sales are equally likely to occur in both the groups. And so, significance testing would answer that there was no difference. But don't we all know that the data is too small to make meaningful inferences?
For the second one:
Traditional statistics derivation assumes normal distribution. But what if the underlying distribution isn't normal? Also, people relate to resampling much better :-)
Standard Error
It is a measure of how far the estimate to be off, on average. More technically, it is the standard deviation of the sampling distribution of a statistic(mostly the mean). Please do not confuse it with standard deviation. Standard deviation is a measure of the variability of the observed quantity. Standard error, on the other hand, describes variability of the estimate.
End of explanation
"""
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
%matplotlib inline
import seaborn as sns
sns.set(color_codes=True)
#Mean of sales before campaign
shoes_before.UA101.mean()
#Confidence interval on the mean of sales before campaign
stats.norm.interval(0.95, loc=shoes_before.UA101.mean(),
scale = shoes_before.UA101.std()/np.sqrt(len(shoes_before)))
#Find 80% Confidence interval
#Find mean and 95% CI on mean of sales after campaign
print(shoes_after.UA101.mean())
stats.norm.interval(0.95, loc=shoes_after.UA101.mean(),
scale = shoes_after.UA101.std()/np.sqrt(len(shoes_after)))
#What does confidence interval mean?
#Effect size
print("Effect size:", shoes_after.UA101.mean()
- shoes_before.UA101.mean() )
"""
Explanation: Hypothesis Testing
(We are covering, what is referred to as, frequentist method of Hypothesis testing)
We would like to know if the effects we see in the sample(observed data) are likely to occur in the population.
The way classical hypothesis testing works is by conducting a statistical test to answer the following question:
Given the sample and an effect, what is the probability of seeing that effect just by chance?
Here are the steps on how we would do this
Define null hypothesis
Compute test statistic
Compute p-value
Interpret the result
If p-value is very low(most often than now, below 0.05), the effect is considered statistically significant. That means that effect is unlikely to have occured by chance. The inference? The effect is likely to be seen in the population too.
This process is very similar to the proof by contradiction paradigm. We first assume that the effect is false. That's the null hypothesis. Next step is to compute the probability of obtaining that effect (the p-value). If p-value is very low(<0.05 as a rule of thumb), we reject the null hypothesis.
End of explanation
"""
stats.ttest_ind(shoes_before.UA101,
shoes_after.UA101, equal_var=True)
"""
Explanation: Null Hypothesis: Mean sales aren't significantly different
Perform t-test and determine the p-value.
End of explanation
"""
stats.shapiro(shoes_before.UA101)
?stats.shapiro
"""
Explanation: p-value is the probability that the effective size was by chance. And here, p-value is almost 0.
Conclusion: The sales difference is significant.
Assumption of t-test
One assumption is that the data used came from a normal distribution.
<br>
There's a Shapiro-Wilk test to test for normality. If p-value is less than 0.05, then there's a low chance that the distribution is normal.
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.